path
stringlengths 7
265
| concatenated_notebook
stringlengths 46
17M
|
---|---|
Daily/Get Permutation.ipynb
|
###Markdown
Get Permutations
###Code
def get_permutations(digits, mapping):
digit = digits[0]
if len(digits) == 1:
return mapping[digit]
result = []
for char in mapping[digit]:
for perm in get_permutations(digits[1:], mapping):
result.append(char + perm)
return result
###Output
_____no_output_____
|
python/ml-02-train_on_absolute_values.ipynb
|
###Markdown
feature selection
###Code
if not 'lsp' in era5:
lsp = era5['tp']-era5['cp']
lsp.name = 'lsp'
else:
lsp = era5['lsp']
reltop = era5['z'].sel(level=500) - era5['z'].sel(level=850)
reltop.name = 'reltop'
q_mean = era5['q'].mean('level')
q_mean.name = 'q_mean'
era5 = xr.merge([era5['cp'], lsp, reltop, q_mean])
###Output
_____no_output_____
###Markdown
Interpolation step
###Code
era5 = era5.interp(latitude=glofas.latitude, longitude=glofas.longitude)
###Output
_____no_output_____
###Markdown
time subset
###Code
era5 = era5.isel(time=slice(0*365,3*365))
glofas = glofas.isel(time=slice(0*365,3*365))
if len(era5.time) < 3000:
era5 = era5.load()
glofas = glofas.load()
krems = dict(latitude=48.403, longitude=15.615)
surrounding = dict(latitude=slice(krems['latitude']+1,
krems['latitude']-1),
longitude=slice(krems['longitude']-1,
krems['longitude']+1))
# select data of interest
dis = glofas.interp(krems)
y = dis #.diff('time', 1) # forecast time difference of discharge
X = era5.sel(surrounding).mean(['latitude', 'longitude'])
X
def add_shifted_predictors(ds, shifts, variables='all'):
"""Adds additional variables to an array which are shifted in time.
Parameters
----------
ds : xr.Dataset
shifts : list of integers
variables : str or list
"""
if variables == 'all':
variables = ds.data_vars
for var in variables:
for i in shifts:
if i == 0: continue # makes no sense to shift by zero
newvar = var+'-'+str(i)
ds[newvar] = ds[var].shift(time=i)
return ds
shifts = range(1,11)
notshift_vars = ['swvl1', 'swvl2']
shift_vars = [v for v in X.data_vars if not v in notshift_vars]
Xs = add_shifted_predictors(X, shifts, variables=shift_vars)
Xar = Xs.to_array(dim='features')
yar = y.to_array()
yar = yar.rename({'variable': 'features'})
yar = yar.drop(['latitude', 'longitude'])
Xy = xr.concat([Xar, yar], dim='features')
Xyt = Xy.dropna('time', how='any') # drop them as we cannot train on nan values
Xy.shape
assert len(Xyt.time) > 1
predictand = 'dis'
predictors = [v for v in Xyt.coords['features'].values if v != predictand]
Xda = Xyt.loc[predictors]
yda = Xyt.loc[predictand]
predictors
time = yda.time
Xda = Xda.chunk(dict(time=-1, features=-1)).data.T
yda = yda.data.squeeze()
Xda
yda
import joblib
from sklearn.pipeline import Pipeline
from dask_ml.preprocessing import StandardScaler
from dask_ml.decomposition import PCA
#from dask_ml.xgboost import XGBRegressor
from dask_ml.linear_model import LogisticRegression
from dask_ml.linear_model import LinearRegression
model_kws = dict(n_jobs=-1, max_iter=10000, verbose=True)
pipe = Pipeline([('scaler', StandardScaler()),
#('pca', PCA(n_components=6)),
('model', LinearRegression(**model_kws)),],
verbose=True)
pipe
Xda = Xda.persist()
with ProgressBar():
pipe.fit(Xda, yda)
###Output
[Pipeline] ............ (step 1 of 2) Processing scaler, total= 0.5s
[Pipeline] ............. (step 2 of 2) Processing model, total= 1.1s
###Markdown
Test it on the same data
###Code
def add_time(vector, time, name=None):
"""Converts arrays to xarrays with a time coordinate."""
return xr.DataArray(vector, dims=('time'), coords={'time': time}, name=name)
with ProgressBar():
ytest = pipe.predict(Xda)
ytest = add_time(ytest, time, name='dis-forecast')
fig, ax = plt.subplots(figsize=(24,5))
Xyt.loc[predictand].to_pandas().plot(ax=ax, label='dis-reanalysis')
ytest.to_pandas().plot(ax=ax, label='dis-forecast')
plt.legend()
ax.set_ylabel('river discharge [m$^3$/s]')
###Output
_____no_output_____
|
Python/2. Python Basics (cont.)/3. Pandas/Notebooks/Pandas - Books, Problem Sets, Lessons, and Solutions/Additional Notebooks/Basic Data-analysis-with-Pandas.ipynb
|
###Markdown
Data Analysis with PandasData can often be stored in a multiple of file formats:- Excel spreadsheets (`.xlsx`);- Comma seperated files (`.csv`);- Json (`.json`);- ...Simlarly you might want to store data in any of the above data formats. This is where the [Pandas](http://pandas.pydata.org/) library can be useful:> "... easy-to-use data structures and data analysis tools for the Python programming language."In this section we will see how to:- Read in data files;- Query those data files;- Write to data files. Reading in data filesConsider the file [goldbach.xlsx](data/goldbach.xlsx) which contains rows of data confirming the [Goldbach conjecture](https://en.wikipedia.org/wiki/Goldbach's_conjecture):> Every even integer greater than 2 can be expressed as the sum of two primes.The data is made up of 3 columns: $N$, $a$ and $b$ and there is a row for every possible expression of $N = a + b$ for $a \leq b$ prime. Note that this data was made using [Appendix A](A1 - Appendix creating Goldbach data.ipynb).Let us start by importing Pandas and reading in the data file. To do this you will need to know the path to the file on your computer:
###Code
import pandas as pd
df = pd.read_excel("data/goldbach.xlsx")
###Output
_____no_output_____
###Markdown
This reads in the excel file as a Pandas data frame. Let us take a look at the first few rows of the data frame (the `head`):
###Code
df.head()
###Output
_____no_output_____
###Markdown
Let us look at the last few rows (the `tail`):
###Code
df.tail()
###Output
_____no_output_____
###Markdown
Querying our dataA quick way of getting a summary of the data we have is with `.describe()`:
###Code
df.describe()
###Output
_____no_output_____
###Markdown
Although in this particular data set that is not terribly useful, that's more useful in the case of statistical analysis.Let us take a closer look at a specific number ($N=322$) and the ways it can be written as the sum of two primes.
###Code
df[df['N'] == 322]
###Output
_____no_output_____
###Markdown
To briefly explain what is happening there: `df['N'] == 322` is returning a series of `True` and `False`, identify the positions in our data where $N=322$. We directly pass that series to the data frame to get out those numbers `df[df['N'] == 322]`.Let us find out how many decompositions exist for each number in our data frame. We will do this using the `value_counts()` statement:
###Code
s = df['N'].value_counts()
s
###Output
_____no_output_____
###Markdown
The above is a series (ordered by count), let us rename the count variable and create a new dataframe:
###Code
df = pd.DataFrame(s.rename('counts'))
df.head()
###Output
_____no_output_____
###Markdown
We have created a dataframe with an index given by `N`. Let us create a new variable which is the normalised count:
###Code
df["normalised_counts"] = df['counts'] / df.index
df.head()
###Output
_____no_output_____
###Markdown
We could now look a bit closer at this using `describe`:
###Code
df.describe()
###Output
_____no_output_____
###Markdown
We can also directly plot our data, but just as when we used Sympy we need to tell Jupyter to display the plots in the notebook:
###Code
%matplotlib inline
###Output
_____no_output_____
###Markdown
Here is a quick histogram of the counts:
###Code
df['counts'].hist();
###Output
_____no_output_____
###Markdown
The normalised counts:
###Code
df['normalised_counts'].hist();
###Output
_____no_output_____
###Markdown
Writing our data to filesFinally let us write this data to a 'comma seperated value' (`.csv`) in case we wanted to look at it later:
###Code
df.to_csv("data/goldbach_counts.csv")
###Output
_____no_output_____
|
Big-Data-Clusters/CU2/Public/content/install/sop054-install-azdata.ipynb
|
###Markdown
SOP054 - Install azdata CLI (using pip)=======================================Steps----- Common functionsDefine helper functions used in this notebook.
###Code
# Define `run` function for transient fault handling, hyperlinked suggestions, and scrolling updates on Windows
import sys
import os
import re
import json
import platform
import shlex
import shutil
import datetime
from subprocess import Popen, PIPE
from IPython.display import Markdown
retry_hints = {} # Output in stderr known to be transient, therefore automatically retry
error_hints = {} # Output in stderr where a known SOP/TSG exists which will be HINTed for further help
install_hint = {} # The SOP to help install the executable if it cannot be found
first_run = True
rules = None
debug_logging = False
def run(cmd, return_output=False, no_output=False, retry_count=0):
"""Run shell command, stream stdout, print stderr and optionally return output
NOTES:
1. Commands that need this kind of ' quoting on Windows e.g.:
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='data-pool')].metadata.name}
Need to actually pass in as '"':
kubectl get nodes -o jsonpath={.items[?(@.metadata.annotations.pv-candidate=='"'data-pool'"')].metadata.name}
The ' quote approach, although correct when pasting into Windows cmd, will hang at the line:
`iter(p.stdout.readline, b'')`
The shlex.split call does the right thing for each platform, just use the '"' pattern for a '
"""
MAX_RETRIES = 5
output = ""
retry = False
global first_run
global rules
if first_run:
first_run = False
rules = load_rules()
# When running `azdata sql query` on Windows, replace any \n in """ strings, with " ", otherwise we see:
#
# ('HY090', '[HY090] [Microsoft][ODBC Driver Manager] Invalid string or buffer length (0) (SQLExecDirectW)')
#
if platform.system() == "Windows" and cmd.startswith("azdata sql query"):
cmd = cmd.replace("\n", " ")
# shlex.split is required on bash and for Windows paths with spaces
#
cmd_actual = shlex.split(cmd)
# Store this (i.e. kubectl, python etc.) to support binary context aware error_hints and retries
#
user_provided_exe_name = cmd_actual[0].lower()
# When running python, use the python in the ADS sandbox ({sys.executable})
#
if cmd.startswith("python "):
cmd_actual[0] = cmd_actual[0].replace("python", sys.executable)
# On Mac, when ADS is not launched from terminal, LC_ALL may not be set, which causes pip installs to fail
# with:
#
# UnicodeDecodeError: 'ascii' codec can't decode byte 0xc5 in position 4969: ordinal not in range(128)
#
# Setting it to a default value of "en_US.UTF-8" enables pip install to complete
#
if platform.system() == "Darwin" and "LC_ALL" not in os.environ:
os.environ["LC_ALL"] = "en_US.UTF-8"
# When running `kubectl`, if AZDATA_OPENSHIFT is set, use `oc`
#
if cmd.startswith("kubectl ") and "AZDATA_OPENSHIFT" in os.environ:
cmd_actual[0] = cmd_actual[0].replace("kubectl", "oc")
# To aid supportabilty, determine which binary file will actually be executed on the machine
#
which_binary = None
# Special case for CURL on Windows. The version of CURL in Windows System32 does not work to
# get JWT tokens, it returns "(56) Failure when receiving data from the peer". If another instance
# of CURL exists on the machine use that one. (Unfortunately the curl.exe in System32 is almost
# always the first curl.exe in the path, and it can't be uninstalled from System32, so here we
# look for the 2nd installation of CURL in the path)
if platform.system() == "Windows" and cmd.startswith("curl "):
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, "curl.exe")
if os.path.exists(p) and os.access(p, os.X_OK):
if p.lower().find("system32") == -1:
cmd_actual[0] = p
which_binary = p
break
# Find the path based location (shutil.which) of the executable that will be run (and display it to aid supportability), this
# seems to be required for .msi installs of azdata.cmd/az.cmd. (otherwise Popen returns FileNotFound)
#
# NOTE: Bash needs cmd to be the list of the space separated values hence shlex.split.
#
if which_binary == None:
which_binary = shutil.which(cmd_actual[0])
if which_binary == None:
if user_provided_exe_name in install_hint and install_hint[user_provided_exe_name] is not None:
display(Markdown(f'HINT: Use [{install_hint[user_provided_exe_name][0]}]({install_hint[user_provided_exe_name][1]}) to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)")
else:
cmd_actual[0] = which_binary
start_time = datetime.datetime.now().replace(microsecond=0)
print(f"START: {cmd} @ {start_time} ({datetime.datetime.utcnow().replace(microsecond=0)} UTC)")
print(f" using: {which_binary} ({platform.system()} {platform.release()} on {platform.machine()})")
print(f" cwd: {os.getcwd()}")
# Command-line tools such as CURL and AZDATA HDFS commands output
# scrolling progress bars, which causes Jupyter to hang forever, to
# workaround this, use no_output=True
#
# Work around a infinite hang when a notebook generates a non-zero return code, break out, and do not wait
#
wait = True
try:
if no_output:
p = Popen(cmd_actual)
else:
p = Popen(cmd_actual, stdout=PIPE, stderr=PIPE, bufsize=1)
with p.stdout:
for line in iter(p.stdout.readline, b''):
line = line.decode()
if return_output:
output = output + line
else:
if cmd.startswith("azdata notebook run"): # Hyperlink the .ipynb file
regex = re.compile(' "(.*)"\: "(.*)"')
match = regex.match(line)
if match:
if match.group(1).find("HTML") != -1:
display(Markdown(f' - "{match.group(1)}": "{match.group(2)}"'))
else:
display(Markdown(f' - "{match.group(1)}": "[{match.group(2)}]({match.group(2)})"'))
wait = False
break # otherwise infinite hang, have not worked out why yet.
else:
print(line, end='')
if rules is not None:
apply_expert_rules(line)
if wait:
p.wait()
except FileNotFoundError as e:
if install_hint is not None:
display(Markdown(f'HINT: Use {install_hint} to resolve this issue.'))
raise FileNotFoundError(f"Executable '{cmd_actual[0]}' not found in path (where/which)") from e
exit_code_workaround = 0 # WORKAROUND: azdata hangs on exception from notebook on p.wait()
if not no_output:
for line in iter(p.stderr.readline, b''):
try:
line_decoded = line.decode()
except UnicodeDecodeError:
# NOTE: Sometimes we get characters back that cannot be decoded(), e.g.
#
# \xa0
#
# For example see this in the response from `az group create`:
#
# ERROR: Get Token request returned http error: 400 and server
# response: {"error":"invalid_grant",# "error_description":"AADSTS700082:
# The refresh token has expired due to inactivity.\xa0The token was
# issued on 2018-10-25T23:35:11.9832872Z
#
# which generates the exception:
#
# UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 179: invalid start byte
#
print("WARNING: Unable to decode stderr line, printing raw bytes:")
print(line)
line_decoded = ""
pass
else:
# azdata emits a single empty line to stderr when doing an hdfs cp, don't
# print this empty "ERR:" as it confuses.
#
if line_decoded == "":
continue
print(f"STDERR: {line_decoded}", end='')
if line_decoded.startswith("An exception has occurred") or line_decoded.startswith("ERROR: An error occurred while executing the following cell"):
exit_code_workaround = 1
# inject HINTs to next TSG/SOP based on output in stderr
#
if user_provided_exe_name in error_hints:
for error_hint in error_hints[user_provided_exe_name]:
if line_decoded.find(error_hint[0]) != -1:
display(Markdown(f'HINT: Use [{error_hint[1]}]({error_hint[2]}) to resolve this issue.'))
# apply expert rules (to run follow-on notebooks), based on output
#
if rules is not None:
apply_expert_rules(line_decoded)
# Verify if a transient error, if so automatically retry (recursive)
#
if user_provided_exe_name in retry_hints:
for retry_hint in retry_hints[user_provided_exe_name]:
if line_decoded.find(retry_hint) != -1:
if retry_count < MAX_RETRIES:
print(f"RETRY: {retry_count} (due to: {retry_hint})")
retry_count = retry_count + 1
output = run(cmd, return_output=return_output, retry_count=retry_count)
if return_output:
return output
else:
return
elapsed = datetime.datetime.now().replace(microsecond=0) - start_time
# WORKAROUND: We avoid infinite hang above in the `azdata notebook run` failure case, by inferring success (from stdout output), so
# don't wait here, if success known above
#
if wait:
if p.returncode != 0:
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(p.returncode)}.\n')
else:
if exit_code_workaround !=0 :
raise SystemExit(f'Shell command:\n\n\t{cmd} ({elapsed}s elapsed)\n\nreturned non-zero exit code: {str(exit_code_workaround)}.\n')
print(f'\nSUCCESS: {elapsed}s elapsed.\n')
if return_output:
return output
def load_json(filename):
"""Load a json file from disk and return the contents"""
with open(filename, encoding="utf8") as json_file:
return json.load(json_file)
def load_rules():
"""Load any 'expert rules' from the metadata of this notebook (.ipynb) that should be applied to the stderr of the running executable"""
try:
# Load this notebook as json to get access to the expert rules in the notebook metadata.
#
j = load_json("sop054-install-azdata.ipynb")
except:
pass # If the user has renamed the book, we can't load ourself. NOTE: Is there a way in Jupyter, to know your own filename?
else:
if "metadata" in j and \
"azdata" in j["metadata"] and \
"expert" in j["metadata"]["azdata"] and \
"rules" in j["metadata"]["azdata"]["expert"]:
rules = j["metadata"]["azdata"]["expert"]["rules"]
rules.sort() # Sort rules, so they run in priority order (the [0] element). Lowest value first.
# print (f"EXPERT: There are {len(rules)} rules to evaluate.")
return rules
def apply_expert_rules(line):
"""Determine if the stderr line passed in, matches the regular expressions for any of the 'expert rules', if so
inject a 'HINT' to the follow-on SOP/TSG to run"""
global rules
for rule in rules:
# rules that have 9 elements are the injected (output) rules (the ones we want). Rules
# with only 8 elements are the source (input) rules, which are not expanded (i.e. TSG029,
# not ../repair/tsg029-nb-name.ipynb)
if len(rule) == 9:
notebook = rule[1]
cell_type = rule[2]
output_type = rule[3] # i.e. stream or error
output_type_name = rule[4] # i.e. ename or name
output_type_value = rule[5] # i.e. SystemExit or stdout
details_name = rule[6] # i.e. evalue or text
expression = rule[7].replace("\\*", "*") # Something escaped *, and put a \ in front of it!
if debug_logging:
print(f"EXPERT: If rule '{expression}' satisfied', run '{notebook}'.")
if re.match(expression, line, re.DOTALL):
if debug_logging:
print("EXPERT: MATCH: name = value: '{0}' = '{1}' matched expression '{2}', therefore HINT '{4}'".format(output_type_name, output_type_value, expression, notebook))
match_found = True
display(Markdown(f'HINT: Use [{notebook}]({notebook}) to resolve this issue.'))
print('Common functions defined successfully.')
# Hints for binary (transient fault) retry, (known) error and install guide
#
retry_hints = {'python': [], 'azdata': ['Endpoint sql-server-master does not exist', 'Endpoint livy does not exist', 'Failed to get state for cluster', 'Endpoint webhdfs does not exist', 'Adaptive Server is unavailable or does not exist', 'Error: Address already in use']}
error_hints = {'python': [['Library not loaded: /usr/local/opt/unixodbc', 'SOP008 - Backup HDFS files to Azure Data Lake Store Gen2 with distcp', '../backup-restore/sop008-distcp-backup-to-adl-gen2.ipynb'], ['WARNING: You are using pip version', 'SOP040 - Upgrade pip in ADS Python sandbox', '../install/sop040-upgrade-pip.ipynb']], 'azdata': [['azdata login', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['The token is expired', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Reason: Unauthorized', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Max retries exceeded with url: /api/v1/bdc/endpoints', 'SOP028 - azdata login', '../common/sop028-azdata-login.ipynb'], ['Look at the controller logs for more details', 'TSG027 - Observe cluster deployment', '../diagnose/tsg027-observe-bdc-create.ipynb'], ['provided port is already allocated', 'TSG062 - Get tail of all previous container logs for pods in BDC namespace', '../log-files/tsg062-tail-bdc-previous-container-logs.ipynb'], ['Create cluster failed since the existing namespace', 'SOP061 - Delete a big data cluster', '../install/sop061-delete-bdc.ipynb'], ['Failed to complete kube config setup', 'TSG067 - Failed to complete kube config setup', '../repair/tsg067-failed-to-complete-kube-config-setup.ipynb'], ['Error processing command: "ApiError', 'TSG110 - Azdata returns ApiError', '../repair/tsg110-azdata-returns-apierror.ipynb'], ['Error processing command: "ControllerError', 'TSG036 - Controller logs', '../log-analyzers/tsg036-get-controller-logs.ipynb'], ['ERROR: 500', 'TSG046 - Knox gateway logs', '../log-analyzers/tsg046-get-knox-logs.ipynb'], ['Data source name not found and no default driver specified', 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ["Can't open lib 'ODBC Driver 17 for SQL Server", 'SOP069 - Install ODBC for SQL Server', '../install/sop069-install-odbc-driver-for-sql-server.ipynb'], ['Control plane upgrade failed. Failed to upgrade controller.', 'TSG108 - View the controller upgrade config map', '../diagnose/tsg108-controller-failed-to-upgrade.ipynb']]}
install_hint = {'python': [], 'azdata': ['SOP063 - Install azdata CLI (using package manager)', '../install/sop063-packman-install-azdata.ipynb']}
###Output
_____no_output_____
###Markdown
Install azdata CLI
###Code
run(f'python --version')
run(f'python -m pip install -r https://aka.ms/azdata')
###Output
_____no_output_____
###Markdown
Display azdata version
###Code
run("azdata --version")
###Output
_____no_output_____
###Markdown
Related (SOP063, SOP054)
###Code
print('Notebook execution complete.')
###Output
_____no_output_____
|
notebooks/__work_in_progress/models_inprogress/geotherm_melt.ipynb
|
###Markdown
A geotherm with radiogenic heating and partial melt
###Code
import underworld as uw
import numpy as np
import glucifer
from underworld import function as fn
import os
from mpi4py import MPI
## All numbers in SI units ##
# model dimensions [km]
length = 270e3
height = 75e3
# desired resolution
lRes = 5e3
hRes = 1e3
(resI, resJ) = (int(length/lRes),int(height/hRes) )
print resI, resJ
mymesh = uw.mesh.FeMesh_Cartesian(elementType='Q1/dQ0',
elementRes = (resI,resJ),
minCoord = (0.0,-1*height),
maxCoord = (length, 15e3))
velocityField = uw.mesh.MeshVariable(mymesh, nodeDofCount=2)
pressureField = uw.mesh.MeshVariable(mymesh.subMesh, nodeDofCount=1)
temperatureField = uw.mesh.MeshVariable(mymesh, nodeDofCount=1)
meltField = uw.mesh.MeshVariable(mymesh, nodeDofCount=1)
# initialise to be sure
velocityField.data[:] = [0.0,0.0]
temperatureField.data[:] = 0.0
# create the swarm
myswarm = uw.swarm.Swarm( mesh=mymesh, particleEscape=True )
swarmLayout = uw.swarm.layouts.GlobalSpaceFillerLayout( swarm=myswarm, particlesPerCell=20 )
# create swarm variables - tre cool!
materialVariable = myswarm.add_variable( dataType="int" , count=1 )
old_melt = myswarm.add_variable( dataType="double", count=1 )
# populate swarm - should be after add_variables()
myswarm.populate_using_layout( layout=swarmLayout )
worldParams = {
'gravity' : 9.8,
'cp' : 1e3,
'diffusivity' : 9.1e-7, # [m^2 s^-1]
'spreadingV' : 3.0441400304414e-10, # m*s-1 ~ 0.96 cm/year
'thermalExp' : 2.8e-5 # [K^-1]
}
meltParams = {
'lhf' : 200, # kJ/kg the latent heat of fusion
'viscosityChange': 1e3,
'densityChange' : 0.13
}
airParams = {
'index' : 0,
'density' : 0., # kg m^-3
'depth' : 5.0e3, # km
'radiogenic' : 0.0, # W m^-3
}
stickyAirParams = {
'index' : 1,
'density' : 1.2, # kg m^-3
'depth' : 0.0e3, # km
'radiogenic' : 0.0, # W m^-3
}
crustParams = {
'index' : 2,
'density' : 2720.0, # kg m^-3
'depth' : -60e3, # km
'radiogenic' : 7.67e-7 # W m^-3
}
mantleParams = {
'index' : 3,
'density' : 3370.0, # kg m^-3
'depth' : -75e3, # km
'radiogenic' : 0.0e-6, # W m^-3
}
worldParams['basalheatFlow'] = -0.022/( mantleParams['density'] * worldParams['cp']) # W m^-3/(rho*cp)
# define material geometry using python loop - slow but explicit
for index in range( len(myswarm.particleCoordinates.data) ):
coord = myswarm.particleCoordinates.data[index][:]
if coord[1] > airParams['depth']:
materialVariable.data[index] = airParams['index']
elif coord[1] > stickyAirParams['depth']:
materialVariable.data[index] = stickyAirParams['index']
elif coord[1] > crustParams['depth']:
materialVariable.data[index] = crustParams['index']
elif coord[1] > mantleParams['depth']:
materialVariable.data[index] = mantleParams['index']
matfig = glucifer.Figure()
matfig.append( glucifer.objects.Points(myswarm, materialVariable) )
matfig.show()
# define special wall boundary index sets
iWalls = mymesh.specialSets['MinI_VertexSet'] + mymesh.specialSets['MaxI_VertexSet']
jWalls = mymesh.specialSets['MinJ_VertexSet'] + mymesh.specialSets['MaxJ_VertexSet']
topWall = mymesh.specialSets['MaxJ_VertexSet']
bottomWall = mymesh.specialSets['MinJ_VertexSet']
# go through local nodes and find switch should be considered air
airNodes = []
for n_i in range(mymesh.nodesLocal):
ycoord = mymesh.data[n_i][1]
if ycoord > 0.0:
airNodes.append(n_i)
airSet = uw.mesh.FeMesh_IndexSet(mymesh, topologicalIndex=0, size=mymesh.nodesGlobal, fromObject=airNodes)
# density units [km*m^-3]
densityMap = { airParams['index'] : airParams['density'],
stickyAirParams['index'] : stickyAirParams['density'],
crustParams['index'] : crustParams['density'] + worldParams['thermalExp'] * (temperatureField-293.15) ,
mantleParams['index'] : mantleParams['density'] + worldParams['thermalExp'] * (temperatureField-293.15) }
densityFn = fn.branching.map( fn_key = materialVariable, mapping = densityMap )
# heating functions for the materials [ W m^-3 / (rho * cp )]
radiogenicMap = { airParams['index'] : 0.0,
stickyAirParams['index'] : 0.0,
crustParams['index'] : crustParams['radiogenic']/(densityFn*worldParams['cp']),
mantleParams['index'] : mantleParams['radiogenic']/(densityFn*worldParams['cp']) }
radiogenicFn = fn.branching.map( fn_key = materialVariable, mapping = radiogenicMap )
# heat boundary condition
temperatureField.data[airSet.data] = 293.15
heatFlux = fn.misc.constant(worldParams['basalheatFlow'])
tempDirichletBCs = uw.conditions.DirichletCondition( variable = temperatureField,
indexSetsPerDof = ( airSet ) )
tempNeumannBCs = uw.conditions.NeumannCondition( fn_flux=heatFlux, variable=temperatureField,
indexSetsPerDof = (bottomWall) )
heatSS = uw.systems.SteadyStateHeat(temperatureField,
fn_diffusivity=worldParams['diffusivity'],
fn_heating=radiogenicFn,
conditions=[tempDirichletBCs, tempNeumannBCs])
heatSSSolver = uw.systems.Solver(heatSS)
heatSSSolver.solve(nonLinearIterate=True)
tfig = glucifer.Figure()
tfig.append( glucifer.objects.Surface(mymesh, temperatureField))
tfig.show()
rhofig = glucifer.Figure()
rhofig.append( glucifer.objects.Points(myswarm, densityFn, valueRange=[0.1,3400], logScale=True) )
rhofig.show()
from math import fabs
class Node(object):
def __init__(self, mesh, index):
self.index = index
self.x = mesh.data[index][0]
self.y = mesh.data[index][1]
class Element(object):
def __init__(self, mesh, index):
self.index = index
self.botLeftNode = Node(mesh, mesh.data_elementNodes[index][0])
self.botRightNode = Node(mesh, mesh.data_elementNodes[index][1])
self.topLeftNode = Node(mesh, mesh.data_elementNodes[index][2])
self.topRightNode = Node(mesh, mesh.data_elementNodes[index][3])
if index < mesh.elementsGlobal - mesh.elementRes[0]:
self.above = index + mesh.elementRes[0]
else:
self.above = None
if index > (mesh.elementRes[0] - 1):
self.below = index - mesh.elementRes[0]
else:
self.below = None
self.dX = fabs(self.botRightNode.x - self.botLeftNode.x)
self.dY = fabs(self.topLeftNode.y - self.botRightNode.y)
self.volume = self.dX * self.dY
def lithoPressure(mesh, lithoPress, Density, gravity):
# Go through the mesh elements starting from the top right corner
# !! Order matters !!
for index in range(mesh.elementsGlobal - 1, -1, -1):
elem = Element(mesh, index)
pressure = 0.
above = elem.above
if above is not None: # Get Pressure from above elements
pressure = lithoPress.data[above]
elemAbove = Element(mesh, above)
pressure += (gravity * elemAbove.dY / 4.0 * (Density.data[elemAbove.botLeftNode.index] +
Density.data[elemAbove.botRightNode.index]))
pressure += (gravity * elem.dY / 4.0 * (Density.data[elem.topLeftNode.index] +
Density.data[elem.topRightNode.index]))
lithoPress.data[index] = pressure
return lithoPress.data
DensityVar = uw.mesh.MeshVariable(mymesh, nodeDofCount=1)
projectorDensity = uw.utils.MeshVariable_Projection( DensityVar, densityFn, type=0 )
projectorDensity.solve()
pressureField.data[:] = lithoPressure(mymesh, pressureField, DensityVar, 9.81)
pfig = glucifer.Figure()
pfig.append( glucifer.objects.Surface(mymesh, pressureField,logScale=False) )
pfig.show()
def evalMelt( pressure, temperature ):
T_s = np.polynomial.polynomial.polyval(pressure, [1063,-1.2e-7,1.2e-16])
T_l = np.polynomial.polynomial.polyval(pressure, [1563.0,-1.2e-7,1.2e-16])
T_ss = ( temperature - 0.5*(T_s+T_l) ) / (T_l-T_s)
return np.where( (-0.5<T_ss) & (T_ss<0.5),
0.5 + T_ss + ( T_ss*T_ss -0.25 )*( 0.4256 + 2.988 * T_ss ),
0.0 )
# test the melt function but rendering it on the mesh
meltField.data[:] = evalMelt (pressureField.evaluate(mymesh), temperatureField.evaluate(mymesh) )
mfig = glucifer.Figure()
mfig.append( glucifer.objects.Surface(mymesh, meltField) )
mfig.show()
# make a geotherm along the MinI_VertexSet
wallNodes = mymesh.specialSets['MinI_VertexSet']
# get data: coords, temperature, pressure, mesh
ycoords = mymesh.data[wallNodes.data][:,1].reshape(-1,1)/1e3
temps = temperatureField.data[wallNodes.data]
pressure = pressureField.evaluate(wallNodes)
melt = meltField.data[wallNodes.data]
# use numpy to evaluate solidus and liquidus
T_s = np.polynomial.polynomial.polyval(pressure, [1063,-1.2e-7,1.2e-16])
T_l = np.polynomial.polynomial.polyval(pressure, [1563.0,-1.2e-7,1.2e-16])
%matplotlib inline
import matplotlib.pyplot as pyplot
import matplotlib.pylab as pylab
pylab.rcParams[ 'figure.figsize'] = 12, 6
pyplot.plot(temps,ycoords, 'o', color = 'black', label='geotherm')
pyplot.plot(T_s,ycoords, 'o', color = 'blue', label='solidus')
pyplot.plot(T_l,ycoords, 'o', color = 'red', label='liquidus')
pyplot.ylabel('depth km')
pyplot.xlabel('Temperature K')
pyplot.legend()
pyplot.show()
# example of what would be required for a restart of the above simulation
# velocityField = uw.mesh.MeshVariable(mymesh, nodeDofCount=3 )
# tDot = uw.mesh.MeshVariable(mymesh, nodeDofCount=1 )
# old_melt.data[:] = evalMelt(-1.0*pressureField.evaluate(myswarm), temperatureField.evaluate(myswarm))
# dF_dt = fn.misc.constant(0.0) # important for redefining later
# dynamicHeating = meltParams['lhf']/worldParams['cp']*dF_dt
# temperatureField.load('./path/to/saved_tempurature.h5')
# heatEq = uw.systems.AdvectionDiffusion(temperatureField, tDot, velocityField,
# fn_diffusivity=worldParams['diffusivity'],
# fn_sourceTerm=radiogenicFn,# + temperatureField*dynamicHeating,
# conditions=[tempDirichletBCs, tempNeumannBCs])
# # setup the melting viscosity modifier as a function
# change = 1.0-(1.0-meltParams['viscosityChange'])/(0.15-0.3)*(meltField-0.15)
# meltViscosityFn = fn.branching.conditional( [ ( meltField < 0.15, 1.0 ),
# ( meltField > 0.3 , 1e-3),
# ( True, change )] )
###Output
_____no_output_____
|
01 - Introduction to Scikit-learn.ipynb
|
###Markdown
Get some data to play with
###Code
from sklearn.datasets import load_digits
digits = load_digits()
digits.keys()
digits.images.shape
print(digits.images[0])
import matplotlib.pyplot as plt
%matplotlib notebook
plt.matshow(digits.images[0], cmap=plt.cm.Greys)
digits.data.shape
digits.target.shape
digits.target
###Output
_____no_output_____
###Markdown
**Data is always a numpy array (or sparse matrix) of shape (n_samples, n_features)** Split the data to get going
###Code
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target)
###Output
_____no_output_____
###Markdown
Really Simple API-------------------0) Import your model class
###Code
from sklearn.svm import LinearSVC
###Output
_____no_output_____
###Markdown
1) Instantiate an object and set the parameters
###Code
svm = LinearSVC(C=0.1)
###Output
_____no_output_____
###Markdown
2) Fit the model
###Code
svm.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
3) Apply / evaluate
###Code
print(svm.predict(X_train))
print(y_train)
svm.score(X_train, y_train)
svm.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
And again---------
###Code
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=50)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
#%load from github
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import make_moons, make_circles, make_classification
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.lda import LDA
from sklearn.qda import QDA
h = .02 # step size in the mesh
names = ["Nearest Neighbors", "Linear SVM", "RBF SVM", "Decision Tree",
"Random Forest", "AdaBoost", "Naive Bayes", "LDA", "QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=1),
AdaBoostClassifier(),
GaussianNB(),
LDA(),
QDA()]
X, y = make_classification(n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = pl.figure(figsize=(27, 9))
i = 1
# iterate over datasets
for ds in datasets:
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.4)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# just plot the dataset first
cm = pl.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
# Plot the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = pl.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright)
# and testing points
ax.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(name)
ax.text(xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
figure.subplots_adjust(left=.02, right=.98)
###Output
_____no_output_____
###Markdown
Get some data to play with
###Code
from sklearn.datasets import load_digits
digits = load_digits()
digits.keys()
digits.images.shape
print(digits.images[0])
import matplotlib.pyplot as plt
%matplotlib notebook
plt.matshow(digits.images[0], cmap=plt.cm.Greys)
digits.data.shape
digits.target.shape
digits.target
###Output
_____no_output_____
###Markdown
**Data is always a numpy array (or sparse matrix) of shape (n_samples, n_features)** Split the data to get going
###Code
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(digits.data,
digits.target)
###Output
_____no_output_____
###Markdown
Really Simple API-------------------0) Import your model class
###Code
from sklearn.svm import LinearSVC
###Output
_____no_output_____
###Markdown
1) Instantiate an object and set the parameters
###Code
svm = LinearSVC(C=0.1)
###Output
_____no_output_____
###Markdown
2) Fit the model
###Code
svm.fit(X_train, y_train)
###Output
_____no_output_____
###Markdown
3) Apply / evaluate
###Code
print(svm.predict(X_train))
print(y_train)
svm.score(X_train, y_train)
svm.score(X_test, y_test)
###Output
_____no_output_____
###Markdown
And again---------
###Code
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(n_estimators=50)
rf.fit(X_train, y_train)
rf.score(X_test, y_test)
###Output
_____no_output_____
|
excercise1A.ipynb
|
###Markdown
Excercise 1AAll our code for exercise 1A. Import dependencies
###Code
from typing import Callable
import numpy as np
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
Create the function$f(x) = \frac{1}{x} - \log(x) + \log(2)$
###Code
def f(x):
result = 1 / x - np.log(x) + np.log(2)
return result
###Output
_____no_output_____
###Markdown
1.A.1 Plotting the functionA plot of the function $f(x)$, at the interval $[0.1, 20]$.
###Code
def plot():
x = 0.1
i = 0
qq = np.empty(199)
qr = np.empty(199)
while x <= 20:
qq[i] = f(x)
qr[i] = x
x += 0.1
i += 1
plt.plot(qr, qq)
plt.grid(linestyle=":", linewidth=1.5)
plt.show()
plot()
###Output
_____no_output_____
###Markdown
1.A.1 False Position MethodBelow this paragraph is our implementation of the False Position Method. We choose the left boundry $a = 1$, and right boundry $b = 5$. We determine the new endpoint using: $c = \frac{a * f(b) - b * f(a)}{f(b) - f(a)}$. Then we check if $f(c)$ is the root of the function. This is done by taking the absolute value of $f(c)$ and checking if it is smaller than the acceptable error. If $c$ is not the root, we check if $c$ has the same sign as $a$, if this is the case, we make $c$ the new left boundry, if the sign of $c$ is the same as $b$, we make $c$ the new right boundry. We then repeat this process until either we reach the specified maximum of iterations, the width of the two boundaries gets smaller than the specified maximal width or if $f(c)$ is within the acceptable error. We then print the last $c$ which was calculated as the root of the function.
###Code
def false_position(
left_boundry: float,
right_boundry: float,
max_iterations: int,
minimum_width: float,
acceptable_error: float,
function: Callable[[float], float],
):
iterations = 0
left_boundry_array = np.empty(max_iterations + 1)
right_boundry_array = np.empty(max_iterations + 1)
left_boundry_array[0] = left_boundry
right_boundry_array[0] = right_boundry
stop_reason: str = ""
center: float = 0.0
for i in range(max_iterations):
iterations = i + 1
center = (
left_boundry * f(right_boundry) - right_boundry * function(left_boundry)
) / (function(right_boundry) - function(left_boundry))
if abs(function(center)) < acceptable_error:
stop_reason = "within acceptable error"
break
if np.sign(function(center)) == np.sign(function(left_boundry)):
left_boundry = center
else:
right_boundry = center
if right_boundry - left_boundry < minimum_width:
stop_reason = "minimum width reached"
break
left_boundry_array[iterations] = left_boundry
right_boundry_array[iterations] = right_boundry
root = center
if iterations == max_iterations:
stop_reason = "maximum iterations reached"
print(
f"{root=}\n{stop_reason=}\n{left_boundry=}\n{right_boundry=}\n{iterations=}"
)
return root
def false_position_main():
left_boundry = 1
right_boundry = 5
max_iterations = 50
minimum_width = 0.00000001
acceptable_error = 0.00000001
function = f
false_position(left_boundry, right_boundry, max_iterations, minimum_width, acceptable_error, function)
false_position_main()
###Output
root=2.8430598827496514
stop_reason='within acceptable error'
left_boundry=1
right_boundry=2.8430598945320167
iterations=26
###Markdown
1.A.2 Bisection MethodBelow this paragraph is our implementation of the Bisection Method. We choose the left boundry $a = 1$, and right boundry $b = 5$. We determine the new endpoint using: $c = \frac{a + (b-a)}{2}$. Then we check if $f(c)$ is the root of the function. This is done by taking the absolute value of $f(c)$ and checking if it is smaller than the acceptable error. If $c$ is not the root, we check if $c$ has the same sign as $a$, if this is the case, we make $c$ the new left boundry, if the sign of $c$ is the same as $b$, we make $c$ the new right boundry. We then repeat this process until either we reach the specified maximum of iterations, the width of the two boundaries gets smaller than the specified maximal width or if $f(c)$ is within the acceptable error. We then print the last $c$ which was calculated as the root of the function.
###Code
def bisection_method(
left_boundry: float,
right_boundry: float,
max_iterations: int,
minimum_width: float,
acceptable_error: float,
function: Callable[[float], float],
):
iterations = 0
left_boundry_array = np.empty(max_iterations + 1)
right_boundry_array = np.empty(max_iterations + 1)
left_boundry_array[0] = left_boundry
right_boundry_array[0] = right_boundry
stop_reason: str = ""
center: float = 0.0
for i in range(max_iterations):
iterations = i + 1
center = left_boundry + (right_boundry - left_boundry) / 2
if abs(function(center)) < acceptable_error:
stop_reason = "within acceptable error"
break
if np.sign(function(center)) == np.sign(function(left_boundry)):
left_boundry = center
else:
right_boundry = center
left_boundry_array[iterations] = left_boundry
right_boundry_array[iterations] = right_boundry
if right_boundry - left_boundry < minimum_width:
stop_reason = "width interval reached"
break
root = center
if iterations == max_iterations:
stop_reason = "max iterations reached"
print(
f"{root=}\n{stop_reason=}\n{left_boundry=}\n{right_boundry=}\n{iterations=}"
)
return root
def bisection_method_main():
left_boundry = 1
right_boundry = 5
max_iterations = 50
minimum_width = 0.00000001
acceptable_error = 0.00000001
function = f
bisection_method(left_boundry, right_boundry, max_iterations, minimum_width, acceptable_error, function)
bisection_method_main()
###Output
root=2.843059867620468
stop_reason='within acceptable error'
left_boundry=2.8430598378181458
right_boundry=2.8430598974227905
iterations=27
###Markdown
1.A.3 Modified False PositionBelow our implementation of the Modified False Position, we use left boundry $a = 1$, $b = 5$.
###Code
def bisection_part(
left_boundry: float,
right_boundry: float,
function: Callable[[float], float],
):
if np.sign(function(left_boundry)) == np.sign(function(right_boundry)):
center = (
left_boundry * function(right_boundry)
- 2 * right_boundry * function(left_boundry)
) / (function(right_boundry) - 2 * function(left_boundry))
else:
center = (
2 * left_boundry * function(right_boundry)
- right_boundry * function(left_boundry)
) / (2 * function(right_boundry) - function(left_boundry))
return center
def false_position_part(
left_boundry: float,
right_boundry: float,
function: Callable[[float], float],
):
center = (left_boundry * function(right_boundry) - right_boundry * function(left_boundry)) / (function(right_boundry) - function(left_boundry))
return center
def modified_false_position(
left_boundry: float,
right_boundry: float,
max_iterations: int,
minimum_width: float,
acceptable_error: float,
function: Callable[[float], float],
):
iteration = 0
counter_left_boundry_used = 0
counter_right_boundry_used = 0
left_boundry_array = np.empty(max_iterations + 1)
right_boundry_array = np.empty(max_iterations + 1)
left_boundry_array[0] = left_boundry
right_boundry_array[0] = right_boundry
stop_reason: str = ""
max_iterations = 50
minimum_width = 0.000001
acceptable_error = 0.00000001
center = 0.0
for i in range(max_iterations):
iteration = i + 1
if counter_left_boundry_used == 2 or counter_right_boundry_used == 2:
center = bisection_part(left_boundry, right_boundry, function)
else:
center = false_position_part(left_boundry, right_boundry, function)
if abs(function(center)) < acceptable_error:
stop_reason = "within acceptable error"
break
if np.sign(function(center)) == np.sign(function(left_boundry)):
left_boundry = center
counter_right_boundry_used += 1
counter_left_boundry_used = 0
else:
right_boundry = center
counter_left_boundry_used += 1
counter_right_boundry_used = 0
left_boundry_array[iteration] = left_boundry
right_boundry_array[iteration] = right_boundry
if right_boundry - left_boundry < minimum_width:
stop_reason = "minimum width reached"
if iteration == max_iterations:
stop_reason = "max iterations reached"
root = center
print(
f"{root=}\n{stop_reason=}\n{left_boundry=}\n{right_boundry=}\n{iteration=}"
)
return root
def modified_false_position_main():
left_boundry = 1
right_boundry = 5
max_iterations = 50
minimum_width = 0.00000001
acceptable_error = 0.00000001
function = f
modified_false_position(left_boundry, right_boundry, max_iterations, minimum_width, acceptable_error, function)
modified_false_position_main()
###Output
root=2.8430598917613343
stop_reason='within acceptable error'
left_boundry=1
right_boundry=2.843059913210903
iteration=22
|
LSDS122_Mac_Scheffer_Assignment.ipynb
|
###Markdown
###Code
!wget https://s3.amazonaws.com/instacart-datasets/instacart_online_grocery_shopping_2017_05_01.tar.gz
!tar --gunzip --extract --verbose --file=instacart_online_grocery_shopping_2017_05_01.tar.gz
%cd instacart_2017_05_01
from IPython.display import display, Image
url = 'https://cdn-images-1.medium.com/max/1600/1*vYGFQCafJtGBBX5mbl0xyw.png'
example = Image(url=url, width=600)
display(example)
!ls -lh
import pandas as pd
aisles = pd.read_csv('aisles.csv')
departments = pd.read_csv('departments.csv')
order_products_prior = pd.read_csv('order_products__prior.csv')
order_products_train = pd.read_csv('order_products__train.csv')
orders = pd.read_csv('orders.csv')
products = pd.read_csv('products.csv')
list(order_products_prior)
list(order_products_train)
len(orders)
order_products = pd.concat([order_products_prior, order_products_train])
order_products.shape
condition = (orders.user_id==1) & (orders.order_number <= 2)
cols = ['user_id', # 206209
'order_id', # 3421083 unique order ids
'order_number', # 100 unique order numbers
'order_dow', # 0-6
'order_hour_of_day'] # 0 - 23
subset = orders.loc[condition,cols]
subset.shape
cols = ['order_id', 'add_to_cart_order', 'product_id']
merged = pd.merge(subset,order_products[cols], how='inner', on='order_id')
merged.shape
subset.shape
merged.shape
display(example)
list(products)
cols = ['product_id', 'product_name']
final = pd.merge(merged,products[cols], how='inner', on='product_id')
final.sort_values(by=['order_number','add_to_cart_order'])
list(products), list(orders)
cols = ['order_id', 'add_to_cart_order', 'product_id']
df_close = pd.merge(orders, order_products[cols])
df_close.shape
cols = ['product_id', 'product_name']
df = pd.merge(df_close, products[cols])
df.shape
list(df.product_id.value_counts(sort=True,ascending=False).index[:10])
top_10 = []
for val in list(df.product_id.value_counts(sort=True,ascending=False).index[:10]):
top_10.append(df.product_name[df.product_id == val])
list(top_10[0])[0]
for i in range(10):
print(list(top_10[i])[0])
# much easier
list(df.product_name.value_counts(sort=True,ascending=False).iloc[:10])
df.product_name.value_counts(sort=True,ascending=False).index[:10]
dict_products = dict(df.product_name.value_counts())
pop_products = {}
for key,val in dict_products.items():
if dict_products[key] > 2900:
pop_products[key] = val
min(pop_products.values())
pop_df = pd.DataFrame.from_dict(pop_products, orient='index')
pop_df['product_name'] = pop_df.index
pop_df = pop_df.rename({0:'total_popularity'})
pop_df = pop_df.reset_index(drop=True)
pop_df.columns
df.shape
df = pd.merge(df, pop_df, how='inner', on='product_name')
df.head()
pivot = df.pivot_table(index='product_name', values='order_hour_of_day')
pivot.index
pivot['product_name'] = pivot.index
pivot.columns
# notice i did it wrong haha, the most popular products are probably around the mean order_hour_of_day for all products
pivot.describe()
len(df.user_id.unique())
list(df.user_id.unique())[50]
df[df.user_id == 1256].order_hour_of_day.mean()
import numpy as np
def mac_mean(series):
return np.mean(series)
cust_df = {'cust_mean_order_hour': [df[df.user_id == uid].order_hour_of_day.apply(mac_mean) for uid in df.user_id.unique()]}
###Output
_____no_output_____
|
ThinkingLikeAScientist/NotebooksWSolutions/p_values_vs_samples.ipynb
|
###Markdown
Danger of P-Value MiningThere are many forms of **p-value mining**. One temptation is to continue to collect data until the sample appears large enough to give a significant result. In this exercise you will explore the perils of this approach. To get started, execute the code in the cell below to import the required packages.
###Code
import numpy as np
import numpy.random as nr
import pandas as pd
## matplotlib with display of graphs inline
import matplotlib.pyplot as plt
import seaborn as sns
#matplotlib inline
###Output
_____no_output_____
###Markdown
The code in the cell below simulates two independently distributed random variables (no correlation) with a sample size of 700 each. Execute this code.
###Code
nr.seed(560)
covariance = np.array([[10.0,0.0],[0.0,5.0]])
uncorrelated_data = nr.multivariate_normal(mean=[0.0,0.0], cov=covariance, size=700)
uncorrelated_data
###Output
_____no_output_____
###Markdown
You can test the relationship between these two variables with a linear model. Execute the code in the cell below to compute a linear model of one variable regressed on the other and then display the summary.
###Code
uncorrelated_OLS_model = sm.OLS(uncorrelated_data[:,0],uncorrelated_data[:,1]).fit()
print(uncorrelated_OLS_model.summary())
###Output
OLS Regression Results
=======================================================================================
Dep. Variable: y R-squared (uncentered): 0.002
Model: OLS Adj. R-squared (uncentered): 0.001
Method: Least Squares F-statistic: 1.607
Date: Fri, 03 Jan 2020 Prob (F-statistic): 0.205
Time: 14:21:28 Log-Likelihood: -1804.3
No. Observations: 700 AIC: 3611.
Df Residuals: 699 BIC: 3615.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
x1 -0.0688 0.054 -1.268 0.205 -0.175 0.038
==============================================================================
Omnibus: 1.157 Durbin-Watson: 2.080
Prob(Omnibus): 0.561 Jarque-Bera (JB): 0.995
Skew: 0.049 Prob(JB): 0.608
Kurtosis: 3.156 Cond. No. 1.00
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
As expected the slope coefficient shows no effect, as it is zero. You can see this by noticing that the confidence interval of the slope coefficient spans zero. Further, the adjusted R-squared value is essentially zero. Execute the code in the cell below and examine the resulting plot.
###Code
data_frame = pd.DataFrame(uncorrelated_data, columns=['variable1','variable2'])
sns.set(color_codes=True)
g = sns.lmplot(x='variable1', y='variable2', data=data_frame, size=7, aspect = 1.2)
###Output
_____no_output_____
###Markdown
Examine the plot. The confidence interval of the best-fit regression line contains the zero effect (zero slope) case indicating that there is no relationship between these variables. Having confirmed that there is no significant relationship between the two variables, let's see what happens when we randomly sample from these data. The code in the cell below samples increasing numbers of cases (rows) from the data set, computes an ordinary least squares model for each sample, and appends the resulting p-value and slope coefficient to lists. Execute this code.
###Code
indx_base = range(700)
slopes=[]
numbers=[]
p_values=[]
for i in range(6,500):
temp_model = sm.OLS(uncorrelated_data[:i,0],uncorrelated_data[:i,1]).fit()
slopes.append(temp_model._results.params[0])
p_values.append(temp_model._results.pvalues[0])
numbers.append(i)
###Output
_____no_output_____
###Markdown
We know that the actual slope coefficient should be close (indistinguishable) from zero. But, is the always the case as an increasing number of cases from the data set are sampled? To find out, execute the code in the cell below to plot the slope coefficient vs. the sample size.
###Code
## Plot slopes vs. sample size
fig, ax = plt.subplots(figsize=(9,8))
ax.plot(numbers, slopes, 'r')
ax.set_title('Estimated slope vs sample size') # Give the plot a main title
ax.set_xlabel('Sample size') # Set text for the x axis
ax.set_ylabel('slope') # Set text for y axis
###Output
_____no_output_____
###Markdown
The resulting plot has the look of a random walk. With increasing sample size the slope coefficient does approach the expected value of 0, there are some significant deviations along the way. In other words, random sampling of the data set can produce effects that might seem far from zero. What about the p-values of the slope coefficient? How does the apparent significance change as the data are randomly sampled? To find out, create and execute code in the cell below to plot the p-value vs. the sample size and examine the result.
###Code
## Plot p-values vs. sample size
fig, ax = plt.subplots(figsize=(9,8))
ax.plot(numbers, p_values, 'r')
ax.set_title('p-value vs sample size') # Give the plot a main title
ax.set_xlabel('Sample size') # Set text for the x axis
ax.set_ylabel('p-value') # Set text for y axis
###Output
_____no_output_____
|
jupyter/notebooks/segmentation/playground_rna.ipynb
|
###Markdown
Playground: Segmentation workflow for RNAThis notebook contains the workflow to detect single-molecule RNA FISH signals that works well for us. It is based off the Allen Cell Segmenter workflow for sialyltransferase 1. Key steps of the workflows:* Auto-Contrast intensity normalization* Background subtraction* Edge-reserving smoothing* 3D Spot filter * Watershed algorithm to split adjacent objects* Size thresholding
###Code
# IO packages
from aicsimageio import AICSImage
import os
import imageio
# calculation packages
import numpy as np
# visualization
from itkwidgets import view
from aicssegmentation.core.visual import seg_fluo_side_by_side, single_fluorescent_view, segmentation_quick_view
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = [16, 12]
# segmentation packages
from aicssegmentation.core.pre_processing_utils import intensity_normalization, image_smoothing_gaussian_slice_by_slice
from aicssegmentation.core.seg_dot import dot_3d_wrapper
from skimage.morphology import dilation, ball, remove_small_objects
# watershed packages
from skimage.segmentation import watershed
from skimage.feature import peak_local_max
from scipy.ndimage import distance_transform_edt
from skimage.measure import label
###Output
_____no_output_____
###Markdown
Loading the dataWe'll start by investigating the segmentation for a single image. You can change which image you're investigating using the FILE_NAME variable below.
###Code
# Update the path to your data
FILE_PATH = '/data/rna/raw-data/'
FILE_NAME = 'NC12_interphase_Slide22_Emb21_Img1.tif'
reader = AICSImage(FILE_PATH + FILE_NAME)
IMG = reader.data
###Output
_____no_output_____
###Markdown
Preview of the image
###Code
#####################
structure_channel = 0
#####################
structure_img0 = IMG[0, 0, structure_channel,:,:,:]
view(single_fluorescent_view(structure_img0))
###Output
_____no_output_____
###Markdown
Image segmentation Step 1: Pre-ProcessingAbout selected algorithms and tuned parameters* **Intensity normalization**: Parameter `intensity_scaling_param` has two options: two values, say `[A, B]`, or single value, say `[K]`. For the first case, `A` and `B` are non-negative values indicating that the full intensity range of the stack will first be cut-off into **[mean - A * std, mean + B * std]** and then rescaled to **[0, 1]**. The smaller the values of `A` and `B` are, the higher the contrast will be. For the second case, `K`>0 indicates min-max Normalization with an absolute intensity upper bound `K` (i.e., anything above `K` will be chopped off and reset as the minimum intensity of the stack) and `K`=0 means min-max Normalization without any intensity bound. * Parameter for st6gal1: `intensity_scaling_param = [9, 19]`* **Smoothing**: 3D gaussian smoothing with `gaussian_smoothing_sigma = 1`. The large the value is, the more the image will be smoothed.
###Code
################################
# First, calculate the best intensity normalization parameters
minimum_value = np.amin(structure_img0)
mean_value = np.mean(structure_img0)
percentile_99 = np.percentile(structure_img0, 99.99)
std_array = np.std(structure_img0)
a = round((mean_value - minimum_value) / std_array, 1)
b = round((percentile_99 - mean_value) / std_array, 1)
################################
## PARAMETER ##
intensity_scaling_param = [a, b]
gaussian_smoothing_sigma = 0.5
################################
# intensity normalization
structure_img = intensity_normalization(structure_img0, scaling_param=intensity_scaling_param)
# smoothing with gaussian filter
structure_img_smooth = image_smoothing_gaussian_slice_by_slice(structure_img, sigma=gaussian_smoothing_sigma)
# quickly visualize the image after smoothing
view(single_fluorescent_view(structure_img_smooth))
###Output
_____no_output_____
###Markdown
If the contrast looks too off, you can tune the normalization parameters.The Allen Cell Segmenter has a function to give you some suggestions for the normalization parameters. If you have certain preference, you can adjust the values based on the suggestion.***After you decide the parameters, you have to re-run the code above with the new parameter*** `intensity_scaling_param = ` Step 2: Core Algorithm step 2.1: Apply 3D Spot filter (S3)Parameter syntax: `[[scale_1, cutoff_1], [scale_2, cutoff_2], ....]` * `scale_x` is set based on the estimated radius of your target dots. For example, if visually the diameter of the dots is usually 3~4 pixels, then you may want to set `scale_x` as `1` or something near `1` (like `1.25`). Multiple scales can be used, if you have dots of very different sizes. * `cutoff_x` is a threshold applied on the actual filter reponse to get the binary result. Smaller `cutoff_x` may yielf more dots and fatter segmentation, while larger `cutoff_x` could be less permisive and yield less dots and slimmer segmentation.
###Code
################################
## PARAMETERS for this step ##
s3_param = [[1, 0.005], [10, 0.1]]
################################
bw = dot_3d_wrapper(structure_img_smooth, s3_param)
# view the segmentation result
viewer_bw = view(segmentation_quick_view(bw))
viewer_bw
###Output
_____no_output_____
###Markdown
After quickly visualizing the segmentation results, you can also visualize the segmentation and original image side by side You may select an ROI to inspect the details* Option 1: Easy ROI selection, but NOT recommended if you are using a laptopYou can select an ROI in above visualization ('viewer_bw'); otherwise, the default ROI is the full image[See this video for How to select ROI](https://www.youtube.com/watch?v=ZO8ey6-tF_0&index=3&list=PL2lHcsoU0YJsh6f8j2vbhg2eEpUnKEWcl)* Option 2: Manually type in ROI coordinatesType in the coordinates of upper left corner and lower right corner of the ROI in the form of [Upper_Left_X, Upper_Left_Y, Lower_right_X, Lower_right_Y].
###Code
# Option 1:
view(seg_fluo_side_by_side(structure_img0,bw,roi=['ROI',viewer_bw.roi_slice()]))
# Option 2:
# view(seg_fluo_side_by_side(structure_img0,bw,roi=['M',[570,370,730,440]]))
###Output
_____no_output_____
###Markdown
If the above segmentation is satisfactory? Here are some possible things to check:-------* Is there big missing chunk? Or are segmented chunks are significantly fatter? You may visualize the intermediate result, i.e. the objects, by `view(segmentation_quick_view(object_for_debug))`. By doing this, you can have some sense whether the objects are roughly regions in individual cells. In other words, we want to roughly isolate the stuffs in individual cells. If not, you may change `global_thresh_method`. Three options `'tri'`, `'med'`,`'ave'` are implemented. `'tri'` is triangle method, `'med'` is median method, `'ave'` is the average of the values returned by triangle method and median method. * Observing missing chunks may be also due to falsely removed objects. Try to decrease `object_minArea` to be more permisive in segmenting objects.* Do you observe a chunk of background stuffs in the segmentation? Try to increase `object_minArea` to exclude these background noise. * If you observe the segmented objects are slightly fatter than the actual size (may take defraction of light into consideration), don't worry, Next step (2.2) can help the make them thinner. * If you observe missing dots in the segmentation, don't worry. Later step (2.3) can pick them up.-------- If the results are satisfactory, go to Step 2.2 directly; otherwise, try to tweak the parameters based on the above suggestions. Assumption: the segmentation result is saved in a variable named `bw`. step 2.2: Use watershed algorithm to separate touching dots
###Code
minArea = 20
local_maxi = peak_local_max(structure_img0, min_distance=5, labels=label(bw), indices=False)
distance = distance_transform_edt(bw)
im_watershed = watershed(-distance, label(dilation(local_maxi, selem=ball(1))), mask=bw, watershed_line=True)
###Output
_____no_output_____
###Markdown
Step 3: Post-Processing Remove all objects smaller than the defined minimum area
###Code
################################
## PARAMETERS for removing small objects ##
minArea = 20
################################
final_seg = remove_small_objects(im_watershed>0, min_size=minArea, connectivity=1, in_place=False)
###Output
_____no_output_____
###Markdown
Result inspection
###Code
viewer_final = view(segmentation_quick_view(final_seg))
viewer_final
###Output
_____no_output_____
###Markdown
You can also focus your inspection on a small ROI* Option 1: Easy ROI selection, but NOT recommended if you are using a laptopYou can select an ROI in above visualization ('viewer_final'); otherwise, the default ROI is the full image[See this video for How to select ROI](https://www.youtube.com/watch?v=ZO8ey6-tF_0&index=3&list=PL2lHcsoU0YJsh6f8j2vbhg2eEpUnKEWcl)* Option 2: Manually type in ROI coordinatesType in the coordinates of upper left corner and lower right corner of the ROI in the form of [Upper_Left_X, Upper_Left_Y, Lower_right_X, Lower_right_Y].
###Code
# Option 1:
view(seg_fluo_side_by_side(structure_img0, final_seg, roi=['ROI',viewer_final.roi_slice()]))
# Option 2:
# view(seg_fluo_side_by_side(struct_img, seg, roi=['M',[267,474, 468, 605]]))
###Output
_____no_output_____
###Markdown
You may also physically save the segmentation results into a .tiff image
###Code
# define where to save your test segmentations
output_filepath = '/output/test-segmentations/'
if not os.path.isdir(output_filepath):
os.makedirs(output_filepath)
# this file will be saved within your docker container volume "output"
# in order to visualize this most easily, you can copy this to your computer using
# docker cp jupyter:/output/ output/
output_seg = final_seg>0
out=output_seg.astype(np.uint8)
out[out>0]=255
imageio.volwrite(output_filepath + FILE_NAME + '-test_rna_seg.tiff', out)
###Output
_____no_output_____
|
Example/FlatImages.ipynb
|
###Markdown
Images as Numpy Array (H, W, 3)
###Code
classes = ['vegetable', 'biscuit', 'fruit']
foods = [cv2.imread('./foods/'+f) for f in foodfns]
targets = [0] * len(foodfns)
foods[0].shape
w2 = Innotater(
ImageInnotation(foods, name='Food'),
MultiClassInnotation(targets, name='FoodType', classes=classes, desc='Food Type')
)
display(w2)
###Output
_____no_output_____
###Markdown
Greyscale Images as Numpy array (H,W)
###Code
# Flatten to greyscale
foods_flat = [f.mean(axis=2).astype('int') for f in foods]
foods_flat[0].shape
foods[0].max()
w2f = Innotater(
ImageInnotation(foods_flat, name='Food'),
MultiClassInnotation(targets, name='FoodType', classes=classes, desc='Food Type')
)
display(w2f)
###Output
_____no_output_____
###Markdown
Images as Numpy Array (H, W, 3)
###Code
classes = ['vegetable', 'biscuit', 'fruit']
foods = [cv2.imread('./foods/'+f) for f in foodfns]
targets = [0] * len(foodfns)
foods[0].shape
w2 = Innotater(
ImageInnotation(foods, name='Food'),
MultiClassInnotation(targets, name='FoodType', classes=classes, desc='Food Type')
)
display(w2)
###Output
_____no_output_____
###Markdown
Greyscale Images as Numpy array (H,W)
###Code
# Flatten to greyscale
foods_flat = [f.mean(axis=2).astype('int') for f in foods]
foods_flat[0].shape
foods[0].max()
w2f = Innotater(
ImageInnotation(foods_flat, name='Food'),
MultiClassInnotation(targets, name='FoodType', classes=classes, desc='Food Type')
)
display(w2f)
###Output
_____no_output_____
|
01_FB_analysis.ipynb
|
###Markdown
Topic usageDownsample (average topic usage per day)
###Code
doctop_avg = import_normalize(doctop_path=doctop_path,
train_data_path='data/S4_fb_phrase.ndjson',
meta_data_path='data/S1_giveaway_removed.csv')
###Output
/home/jan/word-vects/lib/python3.6/site-packages/IPython/core/interactiveshell.py:3254: DtypeWarning: Columns (8,9,15) have mixed types.Specify dtype option on import or set low_memory=False.
if (await self.run_code(code, result, async_=asy)):
###Markdown
NTRUse downsampled document-topic matrix
###Code
# load doctop mat
with open(doctop_path) as f:
doctop = ndjson.load(f)
# timestamp
texts_concat_df = pd.read_csv('TMP_downsampled/texts_concat_df.csv')
ids = texts_concat_df['time']
ntr.calculate(
doc_top_prob=doctop_avg,
ID=ids,
window=7,
out_dir='models/200826_seed_prior_test/ntr/21T_005A_05E_avg',
curb_incomplete=True,
)
###Output
_____no_output_____
###Markdown
Granger
###Code
with open('models/200826_seed_prior_test/ntr/21T_005A_05E_avg/7W.ndjson') as f:
ntr_avg_7 = pd.DataFrame(ndjson.load(f))
ntr_avg_7.doc_id = pd.to_datetime(ntr_avg_7.doc_id)
ntr_csl_topics = granger(
df_predictors=ntr_avg_7[['novelty', 'transience', 'resonance']],
df_outcome=pd.DataFrame(doctop_avg[7:-7])
)
topics_csl_ntr = granger(
df_predictors=pd.DataFrame(doctop_avg[7:-7]),
df_outcome=ntr_avg_7[['novelty', 'transience', 'resonance']]
)
ntr_csl_topics
topics_csl_ntr
###Output
_____no_output_____
###Markdown
Vis
###Code
adaptiveline_infodynamics(
x1=ntr_avg_7.novelty,
x2=ntr_avg_7.resonance,
dates=ntr_avg_7.doc_id,
dataset_name='Facebook',
plot_label='Infodynamics',
c=['mistyrose', 'indianred', 'darkred'],
normalize_signal=True,
filter_span=[32, 56, 128],
my_dpi=300,
fname=os.path.join(model_path, 'timeseries', 'adaptline_NR_norm.png')
)
adaptiveline_infodynamics(
x1=ntr_avg_7.novelty,
x2=ntr_avg_7.resonance,
dates=ntr_avg_7.doc_id,
dataset_name='Facebook',
plot_label='Infodynamics',
c=['mistyrose', 'indianred', 'darkred'],
normalize_signal=False,
filter_span=[32, 56, 128],
my_dpi=300,
fname=os.path.join(model_path, 'timeseries', 'adaptline_NR_raw.png')
)
regline(
x=stats.zscore(ntr_avg_7.novelty),
y=stats.zscore(ntr_avg_7.resonance),
bootstap=True,
fname=os.path.join(model_path, 'timeseries', 'regline_NR.png')
)
###Output
findfont: Font family ['Times New Roman'] not found. Falling back to DejaVu Sans.
|
docs/build/.doctrees/nbsphinx/Tutorials/Tutorial 3 - Making useful plots.ipynb
|
###Markdown
plot Suite2p ROIs location
###Code
# simple plot of the location of the given cell(s) against a black FOV (optionally provide background image as np.array to use)
plotting.plotRoiLocations(trialobj=trialobj, scalebar=True)
###Output
\- executing plotting_func
###Markdown
plot default output images from Suite2p
###Code
plotting.makeSuite2pPlots(obj=trialobj, scalebar=True)
###Output
\- executing plotting_func
###Markdown
plot individual cell's flu or dFF trace
###Code
cell = 10 # pick any cell ID from Suite2p
# use the `to_plot` parameter to select data to plot. Available options == .layers keys in anndata object
print(trialobj.data.layers)
plotting.plot_flu_trace(trialobj=trialobj, cell=cell, to_plot = 'dFF', linewidth = 0.10,
x_lims=None, y_lims= None, x_axis='Frames', title=f'Cell # {cell}', y_axis='dFF Flu')
###Output
Layers with keys: dFF
\- executing plotting_func
###Markdown
plots the raw trace for the Flu mean of the FOV (similar to the ZProject in Fiji)
###Code
plotting.plotMeanFovFluTrace(trialobj=trialobj)
###Output
\- executing plotting_func
\- PLOTTING mean raw flu trace ...
|
BERT_Word_Embeddings.ipynb
|
###Markdown
In this post, I take an in-depth look at word embeddings produced by Google's BERT and show you how to get started with BERT by producing your own word embeddings.This post is presented in two forms--as a blog post [here](http://mccormickml.com/2019/05/14/BERT-word-embeddings-tutorial/) and as a Colab notebook [here](https://colab.research.google.com/drive/1ZQvuAVwA3IjybezQOXnrXMGAnMyZRuPU). The content is identical in both, but: * The blog post format may be easier to read, and includes a comments section for discussion. * The Colab Notebook will allow you to run the code and inspect it as you read through. Introduction History2018 was a breakthrough year in NLP. Transfer learning, particularly models like Allen AI's ELMO, OpenAI's Open-GPT, and Google's BERT allowed researchers to smash multiple benchmarks with minimal task-specific fine-tuning and provided the rest of the NLP community with pretrained models that could easily (with less data and less compute time) be fine-tuned and implemented to produce state of the art results. Unfortunately, for many starting out in NLP and even for some experienced practicioners, the theory and practical application of these powerful models is still not well understood. What is BERT?BERT (Bidirectional Encoder Representations from Transformers), released in late 2018, is the model we will use in this tutorial to provide readers with a better understanding of and practical guidance for using transfer learning models in NLP. BERT is a method of pretraining language representations that was used to create models that NLP practicioners can then download and use for free. You can either use these models to extract high quality language features from your text data, or you can fine-tune these models on a specific task (classification, entity recognition, question answering, etc.) with your own data to produce state of the art predictions. Why BERT embeddings?In this tutorial, we will use BERT to extract features, namely word and sentence embedding vectors, from text data. What can we do with these word and sentence embedding vectors? First, these embeddings are useful for keyword/search expansion, semantic search and information retrieval. For example, if you want to match customer questions or searches against already answered questions or well documented searches, these representations will help you accuratley retrieve results matching the customer's intent and contextual meaning, even if there's no keyword or phrase overlap.Second, and perhaps more importantly, these vectors are used as high-quality feature inputs to downstream models. NLP models such as LSTMs or CNNs require inputs in the form of numerical vectors, and this typically means translating features like the vocabulary and parts of speech into numerical representations. In the past, words have been represented either as uniquely indexed values (one-hot encoding), or more helpfully as neural word embeddings where vocabulary words are matched against the fixed-length feature embeddings that result from models like Word2Vec or Fasttext. BERT offers an advantage over models like Word2Vec, because while each word has a fixed representation under Word2Vec regardless of the context within which the word appears, BERT produces word representations that are dynamically informed by the words around them. For example, given two sentences:"The man was accused of robbing a bank.""The man went fishing by the bank of the river."Word2Vec would produce the same word embedding for the word "bank" in both sentences, while under BERT the word embedding for "bank" would be different for each sentence. Aside from capturing obvious differences like polysemy, the context-informed word embeddings capture other forms of information that result in more accurate feature representations, which in turn results in better model performance.From an educational standpoint, a close examination of BERT word embeddings is a good way to get your feet wet with BERT and its family of transfer learning models, and sets us up with some practical knowledge and context to better understand the inner details of the model in later tutorials.Onward! Install and Import Install the pytorch interface for BERT by Hugging Face. (This library contains interfaces for other pretrained language models like OpenAI's GPT and GPT-2.) We've selected the pytorch interface because it strikes a nice balance between the high-level APIs (which are easy to use but don't provide insight into how things work) and tensorflow code (which contains lots of details but often sidetracks us into lessons about tensorflow, when the purpose here is BERT!).If you're running this code on Google Colab, you will have to install this library each time you reconnect; the following cell will take care of that for you.
###Code
!pip install pytorch-pretrained-bert
###Output
_____no_output_____
###Markdown
Now let's import pytorch, the pretrained BERT model, and a BERT tokenizer. We'll explain the BERT model in detail in a later tutorial, but this is the pre-trained model released by Google that ran for many, many hours on Wikipedia and [Book Corpus](https://arxiv.org/pdf/1506.06724.pdf), a dataset containing +10,000 books of different genres. This model is responsible (with a little modification) for beating NLP benchmarks across a range of tasks. Google released a few variations of BERT models, but the one we'll use here is the smaller of the two available sizes ("base" and "large") and ignores casing, hence "uncased.""
###Code
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
#logging.basicConfig(level=logging.INFO)
import matplotlib.pyplot as plt
% matplotlib inline
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
###Output
100%|██████████| 231508/231508 [00:00<00:00, 418538.21B/s]
###Markdown
Input FormattingBecause BERT is a pretrained model that expects input data in a specific format, we will need:- special tokens to mark the beginning ([CLS]) and separation/end of sentences ([SEP])- tokens that conforms with the fixed vocabulary used in BERT- token IDs from BERT's tokenizer- mask IDs to indicate which elements in the sequence are tokens and which are padding elements- segment IDs used to distinguish different sentences- positional embeddings used to show token position within the sequenceLuckily, this interface takes care of some of these input specifications for us so we will only have to manually create a few of them (we'll revisit the other inputs in another tutorial). Special TokensBERT can take as input either one or two sentences, and expects special tokens to mark the beginning and end of each one:**2 Sentence Input**:[CLS] the man went to the store [SEP] he bought a gallon of milk [SEP]**1 Sentence Input**:[CLS] the man went to the store [SEP]
###Code
text = "Here is the sentence I want embeddings for."
text = "After stealing money from the bank vault, the bank robber was seen fishing on the Mississippi river bank."
marked_text = "[CLS] " + text + " [SEP]"
print (marked_text)
###Output
[CLS] After stealing money from the bank vault, the bank robber was seen fishing on the Mississippi river bank. [SEP]
###Markdown
We've imported a BERT-specific tokenizer, let's take a look at the output: Tokenization
###Code
tokenized_text = tokenizer.tokenize(marked_text)
print (tokenized_text)
###Output
['[CLS]', 'after', 'stealing', 'money', 'from', 'the', 'bank', 'vault', ',', 'the', 'bank', 'robber', 'was', 'seen', 'fishing', 'on', 'the', 'mississippi', 'river', 'bank', '.', '[SEP]']
###Markdown
Notice how the word "embeddings" is represented:['em', 'bed', 'ding', 's']The original word has been split into smaller subwords and characters. The two hash signs preceding some of these subwords are just our tokenizer's way to denote that this subword or character is part of a larger word and preceded by another subword. So, for example, the 'bed' token is separate from the 'bed' token; the first is used whenever the subword 'bed' occurs within a larger word and the second is used explicitly for when the standalone token 'thing you sleep on' occurs.Why does it look this way? This is because the BERT tokenizer was created with a WordPiece model. This model greedily creates a fixed-size vocabulary of individual characters, subwords, and words that best fits our language data. Since the vocabulary limit size of our BERT tokenizer model is 30,000, the WordPiece model generated a vocabulary that contains all English characters plus the ~30,000 most common words and subwords found in the English language corpus the model is trained on. This vocabulary contains four things:1. Whole words2. Subwords occuring at the front of a word or in isolation ("em" as in "embeddings" is assigned the same vector as the standalone sequence of characters "em" as in "go get em" )3. Subwords not at the front of a word, which are preceded by '' to denote this case4. Individual charactersTo tokenize a word under this model, the tokenizer first checks if the whole word is in the vocabulary. If not, it tries to break the word into the largest possible subwords contained in the vocabulary, and as a last resort will decompose the word into individual characters. Note that because of this, we can always represent a word as, at the very least, the collection of its individual characters.As a result, rather than assigning out of vocabulary words to a catch-all token like 'OOV' or 'UNK,' words that are not in the vocabulary are decomposed into subword and character tokens that we can then generate embeddings for. So, rather than assigning "embeddings" and every other out of vocabulary word to an overloaded unknown vocabulary token, we split it into subword tokens ['em', 'bed', 'ding', 's'] that will retain some of the contextual meaning of the original word. We can even average these subword embedding vectors to generate an approximate vector for the original word.(For more information about WordPiece, see the [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) and further disucssion in Google's [Neural Machine Translation System](https://arxiv.org/pdf/1609.08144.pdf).) Here are some examples of the tokens contained in our vocabulary. Tokens beginning with two hashes are subwords or individual characters.
###Code
list(tokenizer.vocab.keys())[5000:5020]
#list(tokenizer.vocab.keys())[4000: 4010]
#tokenized_text
#marked_text
###Output
_____no_output_____
###Markdown
Next, we need to call the tokenizer to match the tokens agains their indices in the tokenizer vocabulary:
###Code
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
for tup in zip(tokenized_text, indexed_tokens):
print (tup)
###Output
('[CLS]', 101)
('after', 2044)
('stealing', 11065)
('money', 2769)
('from', 2013)
('the', 1996)
('bank', 2924)
('vault', 11632)
(',', 1010)
('the', 1996)
('bank', 2924)
('robber', 27307)
('was', 2001)
('seen', 2464)
('fishing', 5645)
('on', 2006)
('the', 1996)
('mississippi', 5900)
('river', 2314)
('bank', 2924)
('.', 1012)
('[SEP]', 102)
###Markdown
Segment IDBERT is trained on and expects sentence pairs, using 1s and 0s to distinguish between the two sentences. That is, for each token in "tokenized_text," we must specify which sentence it belongs to: sentence 0 (a series of 0s) or sentence 1 (a series of 1s). For our purposes, single-sentence inputs only require a series of 1s, so we will create a vector of 1s for each token in our input sentence. If you want to process two sentences, assign each word in the first sentence plus the '[SEP]' token a 0, and all tokens of the second sentence a 1.
###Code
segments_ids = [1] * len(tokenized_text)
print (segments_ids)
###Output
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
###Markdown
Running our ExampleNext we need to convert our data to torch tensors and call the BERT model. The BERT PyTorch interface requires that the data be in torch tensors rather than Python lists, so we convert the lists here - this does not change the shape or the data. model.eval() puts our model in evaluation mode as opposed to training mode. In this case, evaluation mode turns off dropout regularization which is used in training.Calling `from_pretrained` will fetch the model from the internet. When we load the `bert-base-uncased`, we see the definition of the model printed in the logging. The model is a deep neural network with 12 layers! Explaining the layers and their functions is outside the scope of this post, and you can skip over this output for now.
###Code
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
# Load pre-trained model (weights)
model = BertModel.from_pretrained('bert-base-uncased')
# Put the model in "evaluation" mode, meaning feed-forward operation.
model.eval()
indexed_tokens
tokens_tensor
segments_tensors
segments_ids
###Output
_____no_output_____
###Markdown
Next, let's fetch the hidden states of the network.torch.no_grad deactivates the gradient calculations, saves memory, and speeds up computation (we don't need gradients or backpropagation since we're just running a forward pass).
###Code
# Predict hidden states features for each layer
with torch.no_grad():
encoded_layers, _ = model(tokens_tensor, segments_tensors)
###Output
_____no_output_____
###Markdown
OutputThe full set of hidden states for this model, stored in the object `encoded_layers`, is a little dizzying. This object has four dimensions, in the following order:1. The layer number (12 layers)2. The batch number (1 sentence)3. The word / token number (22 tokens in our sentence)4. The hidden unit / feature number (768 features)That’s 202,752 unique values just to represent our one sentence!The second dimension, the batch size, is used when submitting multiple sentences to the model at once; here, though, we just have one example sentence.
###Code
print ("Number of layers:", len(encoded_layers))
layer_i = 0
print ("Number of batches:", len(encoded_layers[layer_i]))
batch_i = 0
print ("Number of tokens:", len(encoded_layers[layer_i][batch_i]))
token_i = 0
print ("Number of hidden units:", len(encoded_layers[layer_i][batch_i][token_i]))
###Output
Number of layers: 12
Number of batches: 1
Number of tokens: 22
Number of hidden units: 768
###Markdown
Let's take a quick look at the range of values for a given layer and token.You'll find that the range is fairly similar for all layers and tokens, with the majority of values falling between \[-2, 2\], and a small smattering of values around -10.
###Code
# For the 5th token in our sentence, select its feature values from layer 5.
token_i = 5
layer_i = 5
vec = encoded_layers[layer_i][batch_i][token_i]
# Plot the values as a histogram to show their distribution.
plt.figure(figsize=(10,10))
plt.hist(vec, bins=200)
plt.show()
###Output
_____no_output_____
###Markdown
Grouping the values by layer makes sense for the model, but for our purposes we want it grouped by token. The following code just reshapes the values so that we have them in the form: ``` [ tokens, layers, features]```
###Code
# Convert the hidden state embeddings into single token vectors
# Holds the list of 12 layer embeddings for each token
# Will have the shape: [# tokens, # layers, # features]
token_embeddings = []
# For each token in the sentence...
for token_i in range(len(tokenized_text)):
# Holds 12 layers of hidden states for each token
hidden_layers = []
# For each of the 12 layers...
for layer_i in range(len(encoded_layers)):
# Lookup the vector for `token_i` in `layer_i`
vec = encoded_layers[layer_i][batch_i][token_i]
hidden_layers.append(vec)
token_embeddings.append(hidden_layers)
# Sanity check the dimensions:
print ("Number of tokens in sequence:", len(token_embeddings))
print ("Number of layers per token:", len(token_embeddings[0]))
###Output
Number of tokens in sequence: 22
Number of layers per token: 12
###Markdown
Creating word and sentence vectors from hidden statesNow, what do we do with these hidden states? We would like to get individual vectors for each of our tokens, or perhaps a single vector representation of the whole sentence, but for each token of our input we have 12 separate vectors each of length 768.In order to get the individual vectors we will need to combine some of the layer vectors...but which layer or combination of layers provides the best representation? The BERT authors tested this by feeding different vector combinations as input features to a BiLSTM used on a named entity recognition task and observing the resulting F1 scores.(Image from [Jay Allamar](http://jalammar.github.io/illustrated-bert/)'s blog)While concatenation of the last four layers produced the best results on this specific task, many of the other methods come in a close second and in general it is advisable to test different versions for your specific application: results may vary.This is partially demonstrated by noting that the different layers of BERT encode very different kinds of information, so the appropriate pooling strategy will change depending on the application because different layers encode different kinds of information. Hanxiao's discussion of this topic is relevant, as are their experiments looking at the PCA visualizations of different layers trained on a news dataset and observing the differences in the four class separations from different pooling strategies:(Images from [Hanxiao's](https://github.com/hanxiao/bert-as-service) BERT-as-a-service)The upshot being that, again**, the correct pooling strategy (mean, max, concatenation, etc.) and layers used (last four, all, last layer, etc.) is dependent on the application**. This discussion of pooling strategies applies both to entire sentence embeddings and individual ELMO-like token embeddings. Word VectorsTo give you some examples, let's create word vectors two ways. First, let's **concatenate** the last four layers, giving us a single word vector per token. Each vector will have length `4 x 768 = 3,072`.
###Code
# Stores the token vectors, with shape [22 x 3,072]
token_vecs_cat = []
# For each token in the sentence...
for token in token_embeddings:
# Concatenate the vectors (that is, append them together) from the last
# four layers.
# Each layer vector is 768 values, so `cat_vec` is length 3,072.
cat_vec = torch.cat((token[-1], token[-2], token[-3], token[-4]), 0)
# Use `cat_vec` to represent `token`.
token_vecs_cat.append(cat_vec)
print ('Shape is: %d x %d' % (len(token_vecs_cat), len(token_vecs_cat[0])))
###Output
Shape is: 22 x 3072
###Markdown
As an alternative method, let's try creating the word vectors by **summing** together the last four layers.
###Code
# Stores the token vectors, with shape [22 x 768]
token_vecs_sum = []
# For each token in the sentence...
for token in token_embeddings:
# Sum the vectors from the last four layers.
sum_vec = torch.sum(torch.stack(token)[-4:], 0)
# Use `sum_vec` to represent `token`.
token_vecs_sum.append(sum_vec)
print ('Shape is: %d x %d' % (len(token_vecs_sum), len(token_vecs_sum[0])))
###Output
Shape is: 22 x 768
###Markdown
Sentence VectorsTo get a single vector for our entire sentence we have multiple application-dependent strategieis, but a simple approach is to average the second to last hiden layer of each token producing a single 768 length vector.
###Code
sentence_embedding = torch.mean(encoded_layers[11], 1)
print ("Our final sentence embedding vector of shape:"), sentence_embedding[0].shape[0]
###Output
Our final sentence embedding vector of shape:
###Markdown
Confirming contextually dependent vectorsTo confirm that the value of these vectors are in fact contextually dependent, let's take a look at the output from the following sentence (if you want to try this out you'll have to run this example separately from the top by replacing our original sentence with the following sentence):
###Code
print (text)
for i,x in enumerate(tokenized_text):
print (i,x)
print ("First fifteen values of 'bank' as in 'bank robber':")
token_vecs_sum[10][:15]
print ("First fifteen values of 'bank' as in 'bank vault':")
token_vecs_sum[6][:15]
print ("First fifteen values of 'bank' as in 'river bank':")
token_vecs_sum[19][:15]
###Output
First fifteen values of 'bank' as in 'river bank':
###Markdown
As we can see, these are all different vectors and they should be; although the word 'bank' is the same, in each case of our sentence it has different meanings, sometimes very different meanings.We have three different uses of "bank" in this sentence, two of which should be almost identical. Let's check the cosine similarity to see if this is the case: **Notes**: cosine similarity of X and Y (X, Y are vectors) is dot product between X and Y
###Code
len(token_vecs_sum[10])
#token_vecs_sum[10].reshape(1, -1)
from sklearn.metrics.pairwise import cosine_similarity
# reshape(1, -1): convert the 1D array into 2D
# reshape(-1, 1): convert 1D array into 2D(n, 1)
# Compare "bank" as in "bank robber" to "bank" as in "river bank"
different_bank = cosine_similarity(token_vecs_sum[10].reshape(1,-1), token_vecs_sum[19].reshape(1,-1))[0][0]
# Compare "bank" as in "bank robber" to "bank" as in "bank vault"
same_bank = cosine_similarity(token_vecs_sum[10].reshape(1,-1), token_vecs_sum[6].reshape(1,-1))[0][0]
print ("Similarity of 'bank' as in 'bank robber' to 'bank' as in 'bank vault':", same_bank)
print ("Similarity of 'bank' as in 'bank robber' to 'bank' as in 'river bank':", different_bank)
###Output
Similarity of 'bank' as in 'bank robber' to 'bank' as in 'river bank': 0.6797334
###Markdown
*UPDATE 6/1/20* - There is a newer version of this Notebook [here](https://colab.research.google.com/drive/1yFphU6PW9Uo6lmDly_ud9a6c4RCYlwdX).-------------------------------In this post, I take an in-depth look at word embeddings produced by Google's BERT and show you how to get started with BERT by producing your own word embeddings.This post is presented in two forms--as a blog post [here](http://mccormickml.com/2019/05/14/BERT-word-embeddings-tutorial/) and as a Colab notebook [here](https://colab.research.google.com/drive/1ZQvuAVwA3IjybezQOXnrXMGAnMyZRuPU). The content is identical in both, but: * The blog post format may be easier to read, and includes a comments section for discussion. * The Colab Notebook will allow you to run the code and inspect it as you read through. Introduction History2018 was a breakthrough year in NLP. Transfer learning, particularly models like Allen AI's ELMO, OpenAI's Open-GPT, and Google's BERT allowed researchers to smash multiple benchmarks with minimal task-specific fine-tuning and provided the rest of the NLP community with pretrained models that could easily (with less data and less compute time) be fine-tuned and implemented to produce state of the art results. Unfortunately, for many starting out in NLP and even for some experienced practicioners, the theory and practical application of these powerful models is still not well understood. What is BERT?BERT (Bidirectional Encoder Representations from Transformers), released in late 2018, is the model we will use in this tutorial to provide readers with a better understanding of and practical guidance for using transfer learning models in NLP. BERT is a method of pretraining language representations that was used to create models that NLP practicioners can then download and use for free. You can either use these models to extract high quality language features from your text data, or you can fine-tune these models on a specific task (classification, entity recognition, question answering, etc.) with your own data to produce state of the art predictions. Why BERT embeddings?In this tutorial, we will use BERT to extract features, namely word and sentence embedding vectors, from text data. What can we do with these word and sentence embedding vectors? First, these embeddings are useful for keyword/search expansion, semantic search and information retrieval. For example, if you want to match customer questions or searches against already answered questions or well documented searches, these representations will help you accuratley retrieve results matching the customer's intent and contextual meaning, even if there's no keyword or phrase overlap.Second, and perhaps more importantly, these vectors are used as high-quality feature inputs to downstream models. NLP models such as LSTMs or CNNs require inputs in the form of numerical vectors, and this typically means translating features like the vocabulary and parts of speech into numerical representations. In the past, words have been represented either as uniquely indexed values (one-hot encoding), or more helpfully as neural word embeddings where vocabulary words are matched against the fixed-length feature embeddings that result from models like Word2Vec or Fasttext. BERT offers an advantage over models like Word2Vec, because while each word has a fixed representation under Word2Vec regardless of the context within which the word appears, BERT produces word representations that are dynamically informed by the words around them. For example, given two sentences:"The man was accused of robbing a bank.""The man went fishing by the bank of the river."Word2Vec would produce the same word embedding for the word "bank" in both sentences, while under BERT the word embedding for "bank" would be different for each sentence. Aside from capturing obvious differences like polysemy, the context-informed word embeddings capture other forms of information that result in more accurate feature representations, which in turn results in better model performance.From an educational standpoint, a close examination of BERT word embeddings is a good way to get your feet wet with BERT and its family of transfer learning models, and sets us up with some practical knowledge and context to better understand the inner details of the model in later tutorials.Onward! 1. Loading Pre-Trained BERT Install the pytorch interface for BERT by Hugging Face. (This library contains interfaces for other pretrained language models like OpenAI's GPT and GPT-2.) We've selected the pytorch interface because it strikes a nice balance between the high-level APIs (which are easy to use but don't provide insight into how things work) and tensorflow code (which contains lots of details but often sidetracks us into lessons about tensorflow, when the purpose here is BERT!).If you're running this code on Google Colab, you will have to install this library each time you reconnect; the following cell will take care of that for you.
###Code
!pip install pytorch-pretrained-bert
###Output
Collecting pytorch-pretrained-bert
[?25l Downloading https://files.pythonhosted.org/packages/d7/e0/c08d5553b89973d9a240605b9c12404bcf8227590de62bae27acbcfe076b/pytorch_pretrained_bert-0.6.2-py3-none-any.whl (123kB)
[K |██▋ | 10kB 25.3MB/s eta 0:00:01
[K |█████▎ | 20kB 32.0MB/s eta 0:00:01
[K |████████ | 30kB 23.7MB/s eta 0:00:01
[K |██████████▋ | 40kB 21.8MB/s eta 0:00:01
[K |█████████████▎ | 51kB 22.6MB/s eta 0:00:01
[K |███████████████▉ | 61kB 16.3MB/s eta 0:00:01
[K |██████████████████▌ | 71kB 16.4MB/s eta 0:00:01
[K |█████████████████████▏ | 81kB 15.7MB/s eta 0:00:01
[K |███████████████████████▉ | 92kB 15.7MB/s eta 0:00:01
[K |██████████████████████████▌ | 102kB 16.3MB/s eta 0:00:01
[K |█████████████████████████████▏ | 112kB 16.3MB/s eta 0:00:01
[K |███████████████████████████████▊| 122kB 16.3MB/s eta 0:00:01
[K |████████████████████████████████| 133kB 16.3MB/s
[?25hRequirement already satisfied: tqdm in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (4.41.1)
Requirement already satisfied: regex in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (2019.12.20)
Requirement already satisfied: requests in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (2.23.0)
Requirement already satisfied: torch>=0.4.1 in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (1.7.0+cu101)
Requirement already satisfied: numpy in /usr/local/lib/python3.6/dist-packages (from pytorch-pretrained-bert) (1.19.5)
Collecting boto3
[?25l Downloading https://files.pythonhosted.org/packages/d7/53/001f30958e799a1635dfd062f94af2b16b836fc4366ff231fe2f2c7c8b50/boto3-1.17.12-py2.py3-none-any.whl (130kB)
[K |████████████████████████████████| 133kB 39.1MB/s
[?25hRequirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (2020.12.5)
Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (1.24.3)
Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (3.0.4)
Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.6/dist-packages (from requests->pytorch-pretrained-bert) (2.10)
Requirement already satisfied: dataclasses in /usr/local/lib/python3.6/dist-packages (from torch>=0.4.1->pytorch-pretrained-bert) (0.8)
Requirement already satisfied: future in /usr/local/lib/python3.6/dist-packages (from torch>=0.4.1->pytorch-pretrained-bert) (0.16.0)
Requirement already satisfied: typing-extensions in /usr/local/lib/python3.6/dist-packages (from torch>=0.4.1->pytorch-pretrained-bert) (3.7.4.3)
Collecting s3transfer<0.4.0,>=0.3.0
[?25l Downloading https://files.pythonhosted.org/packages/ea/43/4b4a1b26eb03a429a4c37ca7fdf369d938bd60018fc194e94b8379b0c77c/s3transfer-0.3.4-py2.py3-none-any.whl (69kB)
[K |████████████████████████████████| 71kB 12.1MB/s
[?25hCollecting jmespath<1.0.0,>=0.7.1
Downloading https://files.pythonhosted.org/packages/07/cb/5f001272b6faeb23c1c9e0acc04d48eaaf5c862c17709d20e3469c6e0139/jmespath-0.10.0-py2.py3-none-any.whl
Collecting botocore<1.21.0,>=1.20.12
[?25l Downloading https://files.pythonhosted.org/packages/85/78/b71baa2fa2dac70638a360ec6fdb00960134a1b68e895acc12b8f6916da2/botocore-1.20.12-py2.py3-none-any.whl (7.2MB)
[K |████████████████████████████████| 7.2MB 43.7MB/s
[?25hRequirement already satisfied: python-dateutil<3.0.0,>=2.1 in /usr/local/lib/python3.6/dist-packages (from botocore<1.21.0,>=1.20.12->boto3->pytorch-pretrained-bert) (2.8.1)
Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.6/dist-packages (from python-dateutil<3.0.0,>=2.1->botocore<1.21.0,>=1.20.12->boto3->pytorch-pretrained-bert) (1.15.0)
[31mERROR: botocore 1.20.12 has requirement urllib3<1.27,>=1.25.4, but you'll have urllib3 1.24.3 which is incompatible.[0m
Installing collected packages: jmespath, botocore, s3transfer, boto3, pytorch-pretrained-bert
Successfully installed boto3-1.17.12 botocore-1.20.12 jmespath-0.10.0 pytorch-pretrained-bert-0.6.2 s3transfer-0.3.4
###Markdown
Now let's import pytorch, the pretrained BERT model, and a BERT tokenizer. We'll explain the BERT model in detail in a later tutorial, but this is the pre-trained model released by Google that ran for many, many hours on Wikipedia and [Book Corpus](https://arxiv.org/pdf/1506.06724.pdf), a dataset containing +10,000 books of different genres. This model is responsible (with a little modification) for beating NLP benchmarks across a range of tasks. Google released a few variations of BERT models, but the one we'll use here is the smaller of the two available sizes ("base" and "large") and ignores casing, hence "uncased.""
###Code
import torch
from pytorch_pretrained_bert import BertTokenizer, BertModel, BertForMaskedLM
# OPTIONAL: if you want to have more information on what's happening, activate the logger as follows
import logging
#logging.basicConfig(level=logging.INFO)
import matplotlib.pyplot as plt
% matplotlib inline
# Load pre-trained model tokenizer (vocabulary)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
###Output
100%|██████████| 231508/231508 [00:00<00:00, 25419239.02B/s]
###Markdown
2. Input FormattingBecause BERT is a pretrained model that expects input data in a specific format, we will need:- special tokens to mark the beginning ([CLS]) and separation/end of sentences ([SEP])- tokens that conforms with the fixed vocabulary used in BERT- token IDs from BERT's tokenizer- mask IDs to indicate which elements in the sequence are tokens and which are padding elements- segment IDs used to distinguish different sentences- positional embeddings used to show token position within the sequenceLuckily, this interface takes care of some of these input specifications for us so we will only have to manually create a few of them (we'll revisit the other inputs in another tutorial). 2.1. Special TokensBERT can take as input either one or two sentences, and expects special tokens to mark the beginning and end of each one:**2 Sentence Input**:`[CLS] The man went to the store. [SEP] He bought a gallon of milk. [SEP]`**1 Sentence Input**:`[CLS] The man went to the store. [SEP]` 2.2. Tokenization BERT provides its own tokenizer, which we imported above. Let's see how it handles the below sentence.
###Code
text = "Here is the sentence I want embeddings for."
marked_text = "[CLS] " + text + " [SEP]"
# Tokenize our sentence with the BERT tokenizer.
tokenized_text = tokenizer.tokenize(marked_text)
# Print out the tokens.
print (tokenized_text)
###Output
['[CLS]', 'here', 'is', 'the', 'sentence', 'i', 'want', 'em', '##bed', '##ding', '##s', 'for', '.', '[SEP]']
###Markdown
Notice how the word "embeddings" is represented:`['em', 'bed', 'ding', 's']`The original word has been split into smaller subwords and characters. The two hash signs preceding some of these subwords are just our tokenizer's way to denote that this subword or character is part of a larger word and preceded by another subword. So, for example, the 'bed' token is separate from the 'bed' token; the first is used whenever the subword 'bed' occurs within a larger word and the second is used explicitly for when the standalone token 'thing you sleep on' occurs.Why does it look this way? This is because the BERT tokenizer was created with a WordPiece model. This model greedily creates a fixed-size vocabulary of individual characters, subwords, and words that best fits our language data. Since the vocabulary limit size of our BERT tokenizer model is 30,000, the WordPiece model generated a vocabulary that contains all English characters plus the ~30,000 most common words and subwords found in the English language corpus the model is trained on. This vocabulary contains four things:1. Whole words2. Subwords occuring at the front of a word or in isolation ("em" as in "embeddings" is assigned the same vector as the standalone sequence of characters "em" as in "go get em" )3. Subwords not at the front of a word, which are preceded by '' to denote this case4. Individual charactersTo tokenize a word under this model, the tokenizer first checks if the whole word is in the vocabulary. If not, it tries to break the word into the largest possible subwords contained in the vocabulary, and as a last resort will decompose the word into individual characters. Note that because of this, we can always represent a word as, at the very least, the collection of its individual characters.As a result, rather than assigning out of vocabulary words to a catch-all token like 'OOV' or 'UNK,' words that are not in the vocabulary are decomposed into subword and character tokens that we can then generate embeddings for. So, rather than assigning "embeddings" and every other out of vocabulary word to an overloaded unknown vocabulary token, we split it into subword tokens ['em', 'bed', 'ding', 's'] that will retain some of the contextual meaning of the original word. We can even average these subword embedding vectors to generate an approximate vector for the original word.(For more information about WordPiece, see the [original paper](https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/37842.pdf) and further disucssion in Google's [Neural Machine Translation System](https://arxiv.org/pdf/1609.08144.pdf).) Here are some examples of the tokens contained in our vocabulary. Tokens beginning with two hashes are subwords or individual characters.*For an exploration of the contents of BERT's vocabulary, see [this notebook](https://colab.research.google.com/drive/1fCKIBJ6fgWQ-f6UKs7wDTpNTL9N-Cq9X) I created and the accompanying YouTube video [here](https://youtu.be/zJW57aCBCTk).*
###Code
list(tokenizer.vocab.keys())[5000:5020]
###Output
_____no_output_____
###Markdown
After breaking the text into tokens, we then have to convert the sentence from a list of strings to a list of vocabulary indeces.From here on, we'll use the below example sentence, which contains two instances of the word "bank" with different meanings.
###Code
# Define a new example sentence with multiple meanings of the word "bank"
text = "After stealing money from the bank vault, the bank robber was seen " \
"fishing on the Mississippi river bank."
# Add the special tokens.
marked_text = "[CLS] " + text + " [SEP]"
# Split the sentence into tokens.
tokenized_text = tokenizer.tokenize(marked_text)
# Map the token strings to their vocabulary indeces.
indexed_tokens = tokenizer.convert_tokens_to_ids(tokenized_text)
# Display the words with their indeces.
for tup in zip(tokenized_text, indexed_tokens):
print('{:<12} {:>6,}'.format(tup[0], tup[1]))
###Output
[CLS] 101
after 2,044
stealing 11,065
money 2,769
from 2,013
the 1,996
bank 2,924
vault 11,632
, 1,010
the 1,996
bank 2,924
robber 27,307
was 2,001
seen 2,464
fishing 5,645
on 2,006
the 1,996
mississippi 5,900
river 2,314
bank 2,924
. 1,012
[SEP] 102
###Markdown
2.3. Segment IDBERT is trained on and expects sentence pairs, using 1s and 0s to distinguish between the two sentences. That is, for each token in "tokenized_text," we must specify which sentence it belongs to: sentence 0 (a series of 0s) or sentence 1 (a series of 1s). For our purposes, single-sentence inputs only require a series of 1s, so we will create a vector of 1s for each token in our input sentence. If you want to process two sentences, assign each word in the first sentence plus the '[SEP]' token a 0, and all tokens of the second sentence a 1.
###Code
# Mark each of the 22 tokens as belonging to sentence "1".
segments_ids = [1] * len(tokenized_text)
print (segments_ids)
###Output
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
###Markdown
3. Extracting Embeddings 3.1. Running BERT on our textNext we need to convert our data to torch tensors and call the BERT model. The BERT PyTorch interface requires that the data be in torch tensors rather than Python lists, so we convert the lists here - this does not change the shape or the data. model.eval() puts our model in evaluation mode as opposed to training mode. In this case, evaluation mode turns off dropout regularization which is used in training.Calling `from_pretrained` will fetch the model from the internet. When we load the `bert-base-uncased`, we see the definition of the model printed in the logging. The model is a deep neural network with 12 layers! Explaining the layers and their functions is outside the scope of this post, and you can skip over this output for now.
###Code
# Convert inputs to PyTorch tensors
tokens_tensor = torch.tensor([indexed_tokens])
segments_tensors = torch.tensor([segments_ids])
# Load pre-trained model (weights)
model = BertModel.from_pretrained('bert-base-uncased')
# Put the model in "evaluation" mode, meaning feed-forward operation.
model.eval()
###Output
100%|██████████| 407873900/407873900 [00:05<00:00, 79622629.31B/s]
###Markdown
Next, let's fetch the hidden states of the network.torch.no_grad deactivates the gradient calculations, saves memory, and speeds up computation (we don't need gradients or backpropagation since we're just running a forward pass).
###Code
# Predict hidden states features for each layer
with torch.no_grad():
encoded_layers, _ = model(tokens_tensor, segments_tensors)
###Output
_____no_output_____
###Markdown
3.2. Understanding the OutputThe full set of hidden states for this model, stored in the object `encoded_layers`, is a little dizzying. This object has four dimensions, in the following order:1. The layer number (12 layers)2. The batch number (1 sentence)3. The word / token number (22 tokens in our sentence)4. The hidden unit / feature number (768 features)That’s 202,752 unique values just to represent our one sentence!The second dimension, the batch size, is used when submitting multiple sentences to the model at once; here, though, we just have one example sentence.
###Code
print ("Number of layers:", len(encoded_layers))
layer_i = 0
print ("Number of batches:", len(encoded_layers[layer_i]))
batch_i = 0
print ("Number of tokens:", len(encoded_layers[layer_i][batch_i]))
token_i = 0
print ("Number of hidden units:", len(encoded_layers[layer_i][batch_i][token_i]))
###Output
Number of layers: 12
Number of batches: 1
Number of tokens: 22
Number of hidden units: 768
###Markdown
Let's take a quick look at the range of values for a given layer and token.You'll find that the range is fairly similar for all layers and tokens, with the majority of values falling between \[-2, 2\], and a small smattering of values around -10.
###Code
# For the 5th token in our sentence, select its feature values from layer 5.
token_i = 5
layer_i = 5
vec = encoded_layers[layer_i][batch_i][token_i]
# Plot the values as a histogram to show their distribution.
plt.figure(figsize=(10,10))
plt.hist(vec, bins=200)
plt.show()
###Output
_____no_output_____
###Markdown
Grouping the values by layer makes sense for the model, but for our purposes we want it grouped by token. Current dimensions:`[ layers, batches, tokens, features]`Desired dimensions:`[ tokens, layers, features]`Luckily, PyTorch includes the `permute` function for easily rearranging the dimensions of a tensor. However, the first dimension is currently a Python list!
###Code
# `encoded_layers` is a Python list.
print(' Type of encoded_layers: ', type(encoded_layers))
# Each layer in the list is a torch tensor.
print('Tensor shape for each layer: ', encoded_layers[0].size())
###Output
Type of encoded_layers: <class 'list'>
Tensor shape for each layer: torch.Size([1, 22, 768])
###Markdown
Let's combine the 12 layers to make this one whole big tensor.
###Code
# Concatenate the tensors for all layers. We use `stack` here to
# create a new dimension in the tensor.
token_embeddings = torch.stack(encoded_layers, dim=0)
token_embeddings.size()
###Output
_____no_output_____
###Markdown
Let's get rid of the "batches" dimension since we don't need it.
###Code
# Remove dimension 1, the "batches".
token_embeddings = torch.squeeze(token_embeddings, dim=1)
token_embeddings.size()
###Output
_____no_output_____
###Markdown
Finally, we can switch around the "layers" and "tokens" dimensions with `permute`.
###Code
# Swap dimensions 0 and 1.
token_embeddings = token_embeddings.permute(1,0,2)
token_embeddings.size()
###Output
_____no_output_____
###Markdown
3.3. Creating word and sentence vectors from hidden statesNow, what do we do with these hidden states? We would like to get individual vectors for each of our tokens, or perhaps a single vector representation of the whole sentence, but for each token of our input we have 12 separate vectors each of length 768.In order to get the individual vectors we will need to combine some of the layer vectors...but which layer or combination of layers provides the best representation? The BERT authors tested this by feeding different vector combinations as input features to a BiLSTM used on a named entity recognition task and observing the resulting F1 scores.(Image from [Jay Allamar](http://jalammar.github.io/illustrated-bert/)'s blog)While concatenation of the last four layers produced the best results on this specific task, many of the other methods come in a close second and in general it is advisable to test different versions for your specific application: results may vary.This is partially demonstrated by noting that the different layers of BERT encode very different kinds of information, so the appropriate pooling strategy will change depending on the application because different layers encode different kinds of information. Hanxiao's discussion of this topic is relevant, as are their experiments looking at the PCA visualizations of different layers trained on a news dataset and observing the differences in the four class separations from different pooling strategies:(Images from [Hanxiao's](https://github.com/hanxiao/bert-as-service) BERT-as-a-service)The upshot being that, again**, the correct pooling strategy (mean, max, concatenation, etc.) and layers used (last four, all, last layer, etc.) is dependent on the application**. This discussion of pooling strategies applies both to entire sentence embeddings and individual ELMO-like token embeddings. Word VectorsTo give you some examples, let's create word vectors two ways. First, let's **concatenate** the last four layers, giving us a single word vector per token. Each vector will have length `4 x 768 = 3,072`.
###Code
# Stores the token vectors, with shape [22 x 3,072]
token_vecs_cat = []
# `token_embeddings` is a [22 x 12 x 768] tensor.
# For each token in the sentence...
for token in token_embeddings:
# `token` is a [12 x 768] tensor
# Concatenate the vectors (that is, append them together) from the last
# four layers.
# Each layer vector is 768 values, so `cat_vec` is length 3,072.
cat_vec = torch.cat((token[-1], token[-2], token[-3], token[-4]), dim=0)
# Use `cat_vec` to represent `token`.
token_vecs_cat.append(cat_vec)
print ('Shape is: %d x %d' % (len(token_vecs_cat), len(token_vecs_cat[0])))
###Output
Shape is: 22 x 3072
###Markdown
As an alternative method, let's try creating the word vectors by **summing** together the last four layers.
###Code
# Stores the token vectors, with shape [22 x 768]
token_vecs_sum = []
# `token_embeddings` is a [22 x 12 x 768] tensor.
# For each token in the sentence...
for token in token_embeddings:
# `token` is a [12 x 768] tensor
# Sum the vectors from the last four layers.
sum_vec = torch.sum(token[-4:], dim=0)
# Use `sum_vec` to represent `token`.
token_vecs_sum.append(sum_vec)
print ('Shape is: %d x %d' % (len(token_vecs_sum), len(token_vecs_sum[0])))
###Output
Shape is: 22 x 768
###Markdown
Sentence VectorsTo get a single vector for our entire sentence we have multiple application-dependent strategies, but a simple approach is to average the second to last hiden layer of each token producing a single 768 length vector.
###Code
# `encoded_layers` has shape [12 x 1 x 22 x 768]
# `token_vecs` is a tensor with shape [22 x 768]
token_vecs = encoded_layers[11][0]
# Calculate the average of all 22 token vectors.
sentence_embedding = torch.mean(token_vecs, dim=0)
print ("Our final sentence embedding vector of shape:", sentence_embedding.size())
###Output
Our final sentence embedding vector of shape: torch.Size([768])
###Markdown
3.4. Confirming contextually dependent vectorsTo confirm that the value of these vectors are in fact contextually dependent, let's look at the different instances of the word "bank" in our example sentence:"After stealing money from the **bank vault**, the **bank robber** was seen fishing on the Mississippi **river bank**."Let's find the index of those three instances of the word "bank" in the example sentence.
###Code
for i, token_str in enumerate(tokenized_text):
print (i, token_str)
###Output
0 [CLS]
1 after
2 stealing
3 money
4 from
5 the
6 bank
7 vault
8 ,
9 the
10 bank
11 robber
12 was
13 seen
14 fishing
15 on
16 the
17 mississippi
18 river
19 bank
20 .
21 [SEP]
###Markdown
They are at 6, 10, and 19.For this analysis, we'll use the word vectors that we created by summing the last four layers.We can try printing out their vectors to compare them.
###Code
print('First 5 vector values for each instance of "bank".')
print('')
print("bank vault ", str(token_vecs_sum[6][:5]))
print("bank robber ", str(token_vecs_sum[10][:5]))
print("river bank ", str(token_vecs_sum[19][:5]))
###Output
First 5 vector values for each instance of "bank".
bank vault tensor([ 2.1319, -2.1413, -1.6260, 0.8638, 3.3173])
bank robber tensor([ 1.1868, -1.5298, -1.3770, 1.0648, 3.1446])
river bank tensor([ 1.1295, -1.4724, -0.7296, -0.0901, 2.4970])
###Markdown
We can see that the values differ, but let's calculate the cosine similarity between the vectors to make a more precise comparison.
###Code
from scipy.spatial.distance import cosine
# Calculate the cosine similarity between the word bank
# in "bank robber" vs "river bank" (different meanings).
diff_bank = 1 - cosine(token_vecs_sum[10], token_vecs_sum[19])
# Calculate the cosine similarity between the word bank
# in "bank robber" vs "bank vault" (same meaning).
same_bank = 1 - cosine(token_vecs_sum[10], token_vecs_sum[6])
print('Vector similarity for *similar* meanings: %.2f' % same_bank)
print('Vector similarity for *different* meanings: %.2f' % diff_bank)
###Output
Vector similarity for *similar* meanings: 0.95
Vector similarity for *different* meanings: 0.68
###Markdown
This looks pretty good! 4. Appendix 4.1. Special tokensIt should be noted that although the `[CLS]` acts as an "aggregate representation" for classification tasks, this is not the best choice for a high quality sentence embedding vector. [According to](https://github.com/google-research/bert/issues/164) BERT author Jacob Devlin: "*I'm not sure what these vectors are, since BERT does not generate meaningful sentence vectors. It seems that this is is doing average pooling over the word tokens to get a sentence vector, but we never suggested that this will generate meaningful sentence representations*."(However, the [CLS] token does become meaningful if the model has been fine-tuned, where the last hidden layer of this token is used as the "sentence vector" for sequence classification.) 4.2. Out of vocabulary wordsFor **out of vocabulary words** that are composed of multiple sentence and character-level embeddings, there is a further issue of how best to recover this embedding. Averaging the embeddings is the most straightforward solution (one that is relied upon in similar embedding models with subword vocabularies like fasttext), but summation of subword embeddings and simply taking the last token embedding (remember that the vectors are context sensitive) are acceptable alternative strategies. 4.3. Similarity metricsIt is worth noting that word-level **similarity comparisons** are not appropriate with BERT embeddings because these embeddings are contextually dependent, meaning that the word vector changes depending on the sentence it appears in. This allows wonderful things like polysemy so that e.g. your representation encodes river "bank" and not a financial institution "bank", but makes direct word-to-word similarity comparisons less valuable. However, for sentence embeddings similarity comparison is still valid such that one can query, for example, a single sentence against a dataset of other sentences in order to find the most similar. Depending on the similarity metric used, the resulting similarity values will be less informative than the relative ranking of similarity outputs since many similarity metrics make assumptions about the vector space (equally-weighted dimensions, for example) that do not hold for our 768-dimensional vector space. 4.4. ImplementationsYou can use the code in this notebook as the foundation of your own application to extract BERT features from text. However, official [tensorflow](https://github.com/google-research/bert/blob/master/extract_features.py) and well-regarded [pytorch](https://github.com/huggingface/pytorch-pretrained-BERT/blob/master/examples/extract_features.py) implementations already exist that do this for you. Additionally, [bert-as-a-service](https://github.com/hanxiao/bert-as-service) is an excellent tool designed specifically for running this task with high performance, and is the one I would recommend for production applications. The author has taken great care in the tool's implementation and provides excellent documentation (some of which was used to help create this tutorial) to help users understand the more nuanced details the user faces, like resource management and pooling strategy.
###Code
###Output
_____no_output_____
|
Word Embedings/TF-IDF Vectorizer/TF_IDF_Vectorizer.ipynb
|
###Markdown
###Code
from google.colab import drive
drive.mount ("/content/drive")
import os
datasets_dir = os.chdir("drive/My Drive/Colab Notebooks/Datasets")
import pandas as pd
import re
import string
import nltk
from nltk.corpus import stopwords
pd.set_option('display.max_colwidth', 100)
stopwords = nltk.corpus.stopwords.words('english')
messages = pd.read_csv('spam.csv', encoding = 'latin-1')
messages.head()
messages = messages.drop(labels = ['Unnamed: 2', 'Unnamed: 3', 'Unnamed: 4'], axis = 1)
messages.columns = ['label', 'text']
print(messages.shape)
messages.head()
###Output
(5572, 2)
###Markdown
TF-IDF VectorizerCreates a document term matrix where the columns represent single unique terms (unigrams) but the cell represents a weighting meant to represent how important a word is to a document
###Code
# define a function to handle all data cleaning
def clean_data(text):
text = "".join([word.lower() for word in text if word not in string.punctuation])
tokens = re.split('\W+', text)
text = [word for word in tokens if word not in stopwords]
return text
###Output
_____no_output_____
###Markdown
Apply TF-IDF Vectorizer
###Code
# Fit the basic TF-IDF Vectorizer and view the results
import sklearn
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf_vect = TfidfVectorizer(analyzer = clean_data)
X_tfidf = tfidf_vect.fit_transform(messages['text'])
print(X_tfidf.shape)
print(tfidf_vect.get_feature_names()) # return all the words that are vectorized or learned from our training data
# The output of TF-IDF is sparce matrix
X_tfidf
###Output
_____no_output_____
###Markdown
Build A Basic Random Forest Model On Top Of Vectorized Text
###Code
X_features = pd.DataFrame(X_tfidf.toarray())
X_features.head()
###Output
_____no_output_____
###Markdown
Explore Random Forest Classifier Attributes & Hyperparameters
###Code
from sklearn.ensemble import RandomForestClassifier
print(RandomForestClassifier())
from sklearn.metrics import precision_score, recall_score
from sklearn.model_selection import train_test_split
# Split data into train and test dataset
X_train, X_test, y_train, y_test = train_test_split(X_features,
messages['label'],
test_size = 0.2)
# Fit a basic Random Forest Model
rf = RandomForestClassifier()
rf_model = rf.fit(X_train, y_train)
# Making predictions on the test set using the fit model
y_pred = rf_model.predict(X_test)
#Evaluate model predictions using precision and recall
precision = precision_score(y_test, y_pred, pos_label = 'spam')
recall = recall_score(y_test, y_pred, pos_label = 'spam')
print(f"Precision: {round(precision, 3)} / Recall: {round(recall, 3)}")
###Output
_____no_output_____
|
workspace_files/resources/Linear Regression.ipynb
|
###Markdown
Simple linear regression
###Code
plt.figure(figsize=(16, 8))
plt.scatter(
data['TV'],
data['sales'],
c='black'
)
plt.xlabel("Money spent on TV ads ($)")
plt.ylabel("Sales ($)")
plt.show()
X = data['TV'].values.reshape(-1,1)
y = data['sales'].values.reshape(-1,1)
reg = LinearRegression()
reg.fit(X, y)
print(reg.coef_[0][0])
print(reg.intercept_[0])
print("The linear model is: Y = {:.5} + {:.5}X".format(reg.intercept_[0], reg.coef_[0][0]))
predictions = reg.predict(X)
plt.figure(figsize=(16, 8))
plt.scatter(
data['TV'],
data['sales'],
c='black'
)
plt.plot(
data['TV'],
predictions,
c='blue',
linewidth=2
)
plt.xlabel("Money spent on TV ads ($)")
plt.ylabel("Sales ($)")
plt.show()
X = data['TV']
y = data['sales']
X2 = sm.add_constant(X)
est = sm.OLS(y, X2)
est2 = est.fit()
print(est2.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: sales R-squared: 0.612
Model: OLS Adj. R-squared: 0.610
Method: Least Squares F-statistic: 312.1
Date: Wed, 28 Nov 2018 Prob (F-statistic): 1.47e-42
Time: 22:06:58 Log-Likelihood: -519.05
No. Observations: 200 AIC: 1042.
Df Residuals: 198 BIC: 1049.
Df Model: 1
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 7.0326 0.458 15.360 0.000 6.130 7.935
TV 0.0475 0.003 17.668 0.000 0.042 0.053
==============================================================================
Omnibus: 0.531 Durbin-Watson: 1.935
Prob(Omnibus): 0.767 Jarque-Bera (JB): 0.669
Skew: -0.089 Prob(JB): 0.716
Kurtosis: 2.779 Cond. No. 338.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
###Markdown
Multiple linear regression
###Code
Xs = data.drop(['sales', 'Unnamed: 0'], axis=1)
y = data['sales'].values.reshape(-1,1)
reg = LinearRegression()
reg.fit(Xs, y)
print(reg.coef_)
print(reg.intercept_)
print("The linear model is: Y = {:.5} + {:.5}*TV + {:.5}*radio + {:.5}*newspaper".format(reg.intercept_[0], reg.coef_[0][0], reg.coef_[0][1], reg.coef_[0][2]))
reg.score(Xs, y)
X = np.column_stack((data['TV'], data['radio'], data['newspaper']))
y = data['sales']
X2 = sm.add_constant(X)
est = sm.OLS(y, X2)
est2 = est.fit()
print(est2.summary())
###Output
OLS Regression Results
==============================================================================
Dep. Variable: sales R-squared: 0.897
Model: OLS Adj. R-squared: 0.896
Method: Least Squares F-statistic: 570.3
Date: Wed, 28 Nov 2018 Prob (F-statistic): 1.58e-96
Time: 22:06:58 Log-Likelihood: -386.18
No. Observations: 200 AIC: 780.4
Df Residuals: 196 BIC: 793.6
Df Model: 3
Covariance Type: nonrobust
==============================================================================
coef std err t P>|t| [0.025 0.975]
------------------------------------------------------------------------------
const 2.9389 0.312 9.422 0.000 2.324 3.554
x1 0.0458 0.001 32.809 0.000 0.043 0.049
x2 0.1885 0.009 21.893 0.000 0.172 0.206
x3 -0.0010 0.006 -0.177 0.860 -0.013 0.011
==============================================================================
Omnibus: 60.414 Durbin-Watson: 2.084
Prob(Omnibus): 0.000 Jarque-Bera (JB): 151.241
Skew: -1.327 Prob(JB): 1.44e-33
Kurtosis: 6.332 Cond. No. 454.
==============================================================================
Warnings:
[1] Standard Errors assume that the covariance matrix of the errors is correctly specified.
|
Copy_of_bear_classifier.ipynb
|
###Markdown
###Code
#hide
!pip install -Uqq fastbook
import fastbook
from fastai.vision.all import *
from fastai.vision.widgets import *
!pip install voila
!jupyter serverextension enable --sys-prefix voila
###Output
_____no_output_____
###Markdown
Test classifier copied from the Amazing Bear Classifier Test text for my version of the bear classifier----
###Code
path = Path()
learn_inf = load_learner(path/'export.pkl', cpu=True)
btn_upload = widgets.FileUpload()
out_pl = widgets.Output()
lbl_pred = widgets.Label()
def on_data_change(change):
lbl_pred.value = ''
img = PILImage.create(btn_upload.data[-1])
out_pl.clear_output()
with out_pl: display(img.to_thumb(128,128))
pred,pred_idx,probs = learn_inf.predict(img)
lbl_pred.value = f'Prediction: {pred}; Probability: {probs[pred_idx]:.04f}'
btn_upload.observe(on_data_change, names=['data'])
display(VBox([widgets.Label('Select your bear!'), btn_upload, out_pl, lbl_pred]))
from google.colab import drive
drive.mount('/content/drive')
###Output
_____no_output_____
|
week3/Assignment/3_EDA.ipynb
|
###Markdown
Exploratory Data Analysis It has been estimated that 75% or more of a Data Scientist's time is spent manipulating the data before any "data science" can even start. Exploratory Data Analysis (EDA) is the process of **getting to know your data**.Classical statistics tests hypotheses by fitting models that demonstrate relationships among the data. But what if you don't have a hypothesis?**Exploratory Data Analysis** helps uncover those relationships so you can form hypotheses. EDA is the first step in virtually every **machine learning** model you will build. **_Data Preparation for Machine Learning_** is a great ebook by "machine learning specialist," Jason Brownlee. Even though the book does not explicitly state that it is teaching EDA, that is absolutely the purpose of the demonstrated techniques. We will be using the Palmer Penguins data set to demonstrate these concepts. The **Seaborn** graphing package can download and access the data from a repository on GitHub using its `load_dataset()` function. Reference:> Data Preparation for Machine Learning> Jason Brownlee> https://machinelearningmastery.com/data-preparation-for-machine-learning/> Horst AM, Hill AP, Gorman KB (2020). palmerpenguins: Palmer> Archipelago (Antarctica) penguin data. R package version 0.1.0.> https://allisonhorst.github.io/palmerpenguins/. doi:> 10.5281/zenodo.3960218. The Bad News EDA is as much art as it is science. The steps you take will depend as much on the data set and your end goal as any set of predefined recipe-like steps I can give you. All I can do is give you an introduction and encourage you to **continue to study the topic on your own**. The Good News The good news is that there are several tasks that you will need to do with every data set. Jason Brownlee gives us a basic framework for data preparation or EDA:* **Data Cleaning**: Finding and fixing errors and problems in the data (outliers, missing data, etc.)* **Feature Selection**: Identifying the relationships between variables and their importance in the task.* **Data Transforms**: Change the scale or distribution of the numbers in a variable.* **Feature Engineering**: Creating new variables from existing variables.* **Dimensionality Reduction**: "Compacting" several variables down into a smaller number of variables (i.e. 10 variables down to 3 for easier graphing).Graphically, that looks something like this: Data Cleaning Data cleaning is where you will find and handle missing values, look for outliers, detect duplicate rows, check to make sure columns have enough variance, etc.Let's demonstrate these ideas with the data set. Libraries and Data
###Code
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
# Makes graphics look better
sns.set()
###Output
_____no_output_____
###Markdown
The Palmer Penguins data set is one of the Seaborn "built-in" data sets. That is, a dataset stored in a special GitHub repository that Seaborn knows how to access.
###Code
penguins = sns.load_dataset('penguins')
###Output
_____no_output_____
###Markdown
First of all, I don't know exactly what kind of variable the data was loaded into. Let's check.
###Code
type(penguins)
###Output
_____no_output_____
###Markdown
Great! Its a Pandas dataframe. Now, let's see the size.
###Code
penguins.shape
###Output
_____no_output_____
###Markdown
344 rows by 7 columns. Let's look at the first few rows.
###Code
penguins.head()
###Output
_____no_output_____
###Markdown
###Code
penguins.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 344 entries, 0 to 343
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 species 344 non-null object
1 island 344 non-null object
2 bill_length_mm 342 non-null float64
3 bill_depth_mm 342 non-null float64
4 flipper_length_mm 342 non-null float64
5 body_mass_g 342 non-null float64
6 sex 333 non-null object
dtypes: float64(4), object(3)
memory usage: 18.9+ KB
###Markdown
As you can see, the `info()` function tells us how many rows we have, how many non-null in each row, and data type for each row. Let's look at some more summary statistics, then we can sort out the missing categorical data.
###Code
penguins.describe()
###Output
_____no_output_____
###Markdown
You can also rotate that display for easier reading:
###Code
penguins.describe().T
###Output
_____no_output_____
###Markdown
The "T" means "transform" -- in this case, swap rows and columns.We will look at this data graphically in a few minutes. Missing Data Looking at the first 5 rows above, we see one row where almost all the data is missing. Row 3 simply has the species and island filled out. That row probably won't contribute much to analysis and we could **drop it**. BUT, we need to be careful -- there are many ways to deal with missing data, and dropping rows is the sledgehammer approach. **Dropping rows should probably _NOT_ be the first tool you reach for.**
###Code
penguins[penguins.isnull().values.any(axis=1)]
###Output
_____no_output_____
###Markdown
As you can see, there are only 2 rows missing all data. The others only have sex data missing, **which can be inferred**. Another way to look at NaNs:
###Code
penguins.isnull().sum()
###Output
_____no_output_____
###Markdown
First, let's get rid of the entries that do not have a weight -- we will do this by copying all rows that have a weight into a new (but same-named) dataframe.
###Code
penguins = penguins[penguins.body_mass_g.notna()]
penguins.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 342 entries, 0 to 343
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 species 342 non-null object
1 island 342 non-null object
2 bill_length_mm 342 non-null float64
3 bill_depth_mm 342 non-null float64
4 flipper_length_mm 342 non-null float64
5 body_mass_g 342 non-null float64
6 sex 333 non-null object
dtypes: float64(4), object(3)
memory usage: 21.4+ KB
###Markdown
Notice that we immediately used `info()` to check and make sure we only dropped two rows.It looks like the 'sex' column is the only one that still has missing values. Let's figure out what the percentage of missing values is:
###Code
penguins.sex.isnull().sum()
penguins.sex.isnull().sum() / penguins.shape[0] * 100
###Output
_____no_output_____
###Markdown
Only about 2.6% of the values in the 'sex' column are missing. There are various ways we could take care of these values:1. As mentioned, we could drop these rows.2. We could use one of Scikit-learn's imputer functions to figure out a value for us.3. We could use the average (numerical) or the most frequent value (category)4. **We could use machine learning to guess the values.**\4 is interesting -- if we consider our column with missing data as being our target, and columns that have data as being our predictor variables, then we can construct a machine learning model using complete records as the train and test data and records with incomplete entries as our general target. Let's try it:First, we have to make sure our target column has only "Male", "Female", and " ".
###Code
penguins.loc[(penguins.sex != "Male") & (penguins.sex != 'Female')]
###Output
_____no_output_____
###Markdown
First, though, we need to change the **'object'** columns to categories. Categorical Data The columns that have an **object** type have non-numeric data in them. That data could be something with little analytical value (like an individual person's name), or it could be something like a category. In this case, we could consider **species, island, and sex** to be categories. Most machine learning algorithms won't work with string data, but Pandas has a "category" data type that will display a string for **your** convenience, but actually be a number for the ML. There are some complexities to be aware of when converting categories from string to numeric. Numbers imply order. If we number the species like so:* 1 - Chinstrap* 2 - Adelie* 3 - GentooDoes that imply that Chinstrap is *better* than Gentoo? There are more of them? They are bigger? In our case, the numbers have no meaning and we have to make sure the ML algorithms understand that. Pandas "category" type can be either ordered or unordered, with unordered being the default. Let's convert those columns now.
###Code
penguins.species = penguins.species.astype('category')
penguins.island = penguins.island.astype('category')
penguins.sex = penguins.sex.astype('category')
penguins.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 342 entries, 0 to 343
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 species 342 non-null category
1 island 342 non-null category
2 bill_length_mm 342 non-null float64
3 bill_depth_mm 342 non-null float64
4 flipper_length_mm 342 non-null float64
5 body_mass_g 342 non-null float64
6 sex 333 non-null category
dtypes: category(3), float64(4)
memory usage: 14.7 KB
###Markdown
Pandas get_dummy() Even though the columns are categories, they still need to be encoded, and Pandas has a function to help with that.
###Code
peng_encoded = pd.get_dummies(penguins, columns=['island','species'], prefix=['island', 'species'])
peng_encoded
###Output
_____no_output_____
###Markdown
Now, just simple 0/1 for 'sex'.
###Code
peng_encoded['sex'] = peng_encoded['sex'].map({'Male':0, 'Female':1})
peng_encoded.isnull().sum()
###Output
_____no_output_____
###Markdown
--- Scikit-learn also has many types of encoding for categories. You should spend some time investigating them. Impute Missing Data Typically, we use capital 'x' as the dataset variable and 'y' as the target. We will be using a simple form of **supervised** machine learning, RandomForestClassifier. In supervised ML, our training dataset has to have known target values, so we will divide our data for training, testing and prediction. Now, for our data, we want all the rows that have a complete set of data and all the columns except 'sex'. That one will go into our training target dataset.
###Code
columns = [c for c in peng_encoded.columns if c != 'sex']
X = peng_encoded[peng_encoded.sex.notnull()].loc[:,columns]
X
###Output
_____no_output_____
###Markdown
Now, we need all the 'sex' values that are not null and corresponding to the array above.The first set of square brackets choose which rows we want -- the not null ones -- and the second set choose the column we want.
###Code
y = peng_encoded[peng_encoded.sex.notnull()]['sex']
y
###Output
_____no_output_____
###Markdown
Next, I'm going to split up the training data so I have a testing set and can look at accuracy statistics.
###Code
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.70)
###Output
_____no_output_____
###Markdown
Now we import and use the classifier.
###Code
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=100, random_state=0)
clf.fit(x_train, y_train)
###Output
_____no_output_____
###Markdown
That's all there is to training the Classifier. Let's see how we did:
###Code
y_pred = clf.predict(x_test)
from sklearn import metrics
print(f'Model accuracy = {metrics.accuracy_score(y_test,y_pred)}')
###Output
Model accuracy = 0.8974358974358975
###Markdown
That's almost 90% accurate. Not bad! We could probably get better by segregating island and species, and on larger datasets it would probably be worth it. Now, let's get the missing values and move on...
###Code
x_missing = peng_encoded[peng_encoded.sex.isnull()].loc[:,columns]
x_missing
y_missing = clf.predict(x_missing)
y_missing
peng_encoded_bk = peng_encoded.copy()
x_missing['sex'] = y_missing.astype('int')
x_missing
###Output
_____no_output_____
###Markdown
Interestingly enough, it looks like the original indexes followed the rows all the way through the process. We should be able to use that to put the values back in the original dataframe.
###Code
x_missing.loc[8]
penguins.loc[8]
x_missing.index.to_list()
x_missing['sex'] = x_missing['sex'].map({0:'Male', 1:'Female'})
x_missing
for i in x_missing.index.to_list():
penguins['sex'].loc[i] = x_missing['sex'].loc[i]
penguins.info()
###Output
<class 'pandas.core.frame.DataFrame'>
Int64Index: 342 entries, 0 to 343
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 species 342 non-null category
1 island 342 non-null category
2 bill_length_mm 342 non-null float64
3 bill_depth_mm 342 non-null float64
4 flipper_length_mm 342 non-null float64
5 body_mass_g 342 non-null float64
6 sex 342 non-null category
dtypes: category(3), float64(4)
memory usage: 22.8 KB
###Markdown
Now, the rows with the missing 'sex' data has the predictions added back in. All that is left to do is add the two dataframes back together and reset the index. Notice the **inplace=True** above. Let's check to make sure there aren't any NaNs left:
###Code
peng_encoded.isnull().sum()
###Output
_____no_output_____
###Markdown
Data Variance Sometimes columns will have only 1 value or a very small number of unique values. Analytically speaking, these columns aren't worth much. **Variance** is defined as the average value from the mean. Therefore, columns that have only one value have **_zero_** variance. Let's briefly load a "standard" example machine learning data set to demonstrate.
###Code
oil = pd.read_csv('data_wk3/oil-spill.csv', header=None)
oil.head()
###Output
_____no_output_____
###Markdown
We'll use the Pandas `nunique()` function to see how many unique values exist in each column:
###Code
oil.nunique()
###Output
_____no_output_____
###Markdown
As you can see, Column 22 has only 1 unique value and several columns have fewer than 10 unique values. Boxplots Box-and-whisker plots, or just boxplots, can help detect this type of problem, also. Recall: * The top and bottom "whiskers" show min and max values, * The bottom of the box shows the first quartile* The line in the middle shows the mean* The top of the box shows the third quartile.* Outliers are shown outside of the min/max whiskers.Pandas knows how to make boxplots of columns. It can plot all columns (default) or a list of columns. Here are a couple of examples from the oil spill dataset above:
###Code
oil.boxplot([2,3])
###Output
_____no_output_____
###Markdown
Now, compare that to columns 22, 36, 45, etc.
###Code
oil.boxplot([22, 36, 45, 49])
###Output
_____no_output_____
###Markdown
***Something is obviously going on with those columns, and they should be investigated before being used in modeling and analysis. You don't want to throw away a perfectly good categorical column that is already in number form.***Now, let's go back to our penguins. If we just ask Pandas to create a default boxplot, we see that the body_mass_g column overshadows the others (indicating it may be a good candidate for scaling, later).
###Code
penguins.boxplot()
###Output
_____no_output_____
###Markdown
Let's look at the columns without body_mass_g:
###Code
penguins.columns
penguins.boxplot(['bill_length_mm', 'bill_depth_mm','flipper_length_mm'])
penguins.boxplot(['bill_length_mm', 'bill_depth_mm'])
penguins.nunique()
###Output
_____no_output_____
###Markdown
Duplicate Rows Pandas can give us some help in identifying duplicate rows, although it just gives us a True or False. To see it in action, let's look at the **Iris** data set:
###Code
iris = sns.load_dataset('iris')
iris.duplicated()
###Output
_____no_output_____
###Markdown
To really see what is going on, we can use the Python `any()` filter:
###Code
iris.duplicated().any()
###Output
_____no_output_____
###Markdown
OK, so we have some duplicates. Let's put that back in the dataset to see which ones:
###Code
dupes = iris.duplicated()
print(iris[dupes])
###Output
sepal_length sepal_width petal_length petal_width species
142 5.8 2.7 5.1 1.9 virginica
###Markdown
Pandas also has a `drop_duplicates()` function to help.
###Code
print(f'Before: {iris.shape}')
iris.drop_duplicates(inplace=True)
print(f'After: {iris.shape}')
###Output
Before: (150, 5)
After: (149, 5)
###Markdown
Outliers Outliers are data points that **lie out**side of "normal" observations. As we saw earlier, boxplots can show us outliers, and so can scatter plots. Remember, column 3 of the **oil** data set had some outliers:
###Code
oil.plot.scatter(2,3, c='darkblue')
###Output
_____no_output_____
###Markdown
It really gets fun when we add color. Let's look at our penguins...I'm switching to Seaborn because Pandas can't interpret 'species' unless I convert it to a number.
###Code
sns.scatterplot(data=penguins,x='flipper_length_mm', y='body_mass_g',hue='species')
###Output
_____no_output_____
###Markdown
Wow... there is clearly a linear relationship between flipper length and body mass, and Gentoo penguins are obviously larger than the other two species. Sometimes the outliers aren't as easy to identify visually. **Jason Brownlee** presents several methods for mathematically determining outliers, including:* Standard deviation method* Interquartile range methodThen he introduces **Automatic Outlier Detection** using SciKit-Learn's `LocalOutlierFactor` class and demonstrates how removing outliers improves predictive accuracy using one of the simplest forms of predictive modeling -- linear regression. In a nutshell, linear regression uses the training data to draw a straight "regression" line on the graph of the data. Predictive requests are then matched up to the appropriate (x, y) coordinate and the resulting value returned.Mr. Brownlee's code is reproduced below:First, **linear regression** leaving the outliers in:
###Code
# evaluate model on the raw dataset
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_absolute_error
# load the dataset
df = read_csv('data_wk3/housing.csv', header=None)
# retrieve the array
data = df.values
# split into input and output elements
X, y = data[:, :-1], data[:, -1]
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
# fit the model
model = LinearRegression()
model.fit(X_train, y_train)
# evaluate the model
yhat = model.predict(X_test)
# evaluate predictions
mae = mean_absolute_error(y_test, yhat)
print('MAE: %.3f' % mae)
###Output
MAE: 3.417
###Markdown
According to https://www.statisticshowto.com/absolute-error/, **Absolute Error** measures the difference between a measured value and the "true" value. In this case, it is the difference between the **predicted** value and the true value.**Mean Absolute Error (MAE)** measures the mean of all absolute errors in a group of observations (like our test data set).Now, let's remove the outliers:
###Code
# evaluate model on training dataset with outliers removed
from pandas import read_csv
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.neighbors import LocalOutlierFactor
from sklearn.metrics import mean_absolute_error
# load the dataset
df = read_csv('data_wk4/housing.csv', header=None)
# retrieve the array
data = df.values
# split into input and output elements
X, y = data[:, :-1], data[:, -1]
# split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=1)
# summarize the shape of the training dataset
print(X_train.shape, y_train.shape)
# identify outliers in the training dataset
lof = LocalOutlierFactor()
yhat = lof.fit_predict(X_train)
# select all rows that are not outliers
mask = yhat != -1
X_train, y_train = X_train[mask, :], y_train[mask]
# summarize the shape of the updated training dataset
print(X_train.shape, y_train.shape)
# fit the model
model = LinearRegression()
model.fit(X_train, y_train)
# evaluate the model
yhat = model.predict(X_test)
# evaluate predictions
mae = mean_absolute_error(y_test, yhat)
print('MAE: %.3f' % mae)
###Output
(339, 13) (339,)
(305, 13) (305,)
MAE: 3.356
###Markdown
---The decrease in **Mean Absolute Error** indicates an increase in accuracy. The easy assumption is that the outliers biased the model's learning and thus affected the predictive ability. Feature Selection **Features** are, of course, the columns of your dataset. **Feature selection** involves deciding which features to include in the training and usage of machine learning models. > Many models, especially those based on regression slopes and intercepts, will estimateparameters for every term in the model. Because of this, the presence of non-informative variables can add uncertainty to the predictions and reduce the overalleffectiveness of the model.>> *— Page 488, Applied Predictive Modeling, 2013.*Besides accuracy concerns, with many model types, more features will mean increased training time. Pairplot Seaborn has the ability to plot each feature, one by one, against the others. As you can see below, this is an easy way to uncover linear relationships.**Caution:** Doing a pairplot on a large dataset with no restrictions can bring your computer to its knees. Only plot **subsets** of the variables, as appropriate.
###Code
sns.pairplot(penguins)
###Output
_____no_output_____
###Markdown
Correlation matrix and heatmaps A Pandas dataset knows how to create a correlation matrix showing relationships among all the features. While this can be useful, it outputs a table of numbers that can be hard to intrepret over a large dataset.The `corr()` function does a pearson correlation by default, although others are available.
###Code
# Pandas' pearson correlation
corrmat = penguins.corr()
corrmat
###Output
_____no_output_____
###Markdown
A much better way is to put the correlation matrix into a Seaborn heatmap.Note that the heatmap will show both positive and negative correlations.
###Code
f, ax = plt.subplots(figsize=(12,10)) #setting some parameters of the plot to help readability
sns.heatmap(corrmat, vmax = .8, square=True)
###Output
_____no_output_____
###Markdown
---It might be useful to see the actual correlation values in the heatmap cells. To do that, we can turn on cell annotations with the `annot=True` parameter:
###Code
hm = sns.heatmap(corrmat, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size':14},yticklabels=cols.values, xticklabels=cols.values)
plt.show
###Output
_____no_output_____
###Markdown
But what if we have a whole bunch of variables? Remember our housing dataset? Let's look at a version that has headers real quick:
###Code
house = pd.read_csv('data_wk4/housing_train.csv')
house.head()
###Output
_____no_output_____
###Markdown
Let's drop that Id column. It won't help anything in the analysis.
###Code
house.drop('Id', axis=1, inplace=True)
house.shape
###Output
_____no_output_____
###Markdown
81 columns should be enough to prove a point.First, the heatmap:
###Code
house_cor = house.corr()
sns.heatmap(house_cor)
###Output
_____no_output_____
###Markdown
Looks a bit like a pizza!Maybe making it bigger will help.
###Code
f, ax = plt.subplots(figsize=(12,10)) #setting some parameters of the plot to help readability
sns.heatmap(house_cor, vmax = .8, square=True)
###Output
_____no_output_____
###Markdown
That's still a lot of variables. **SalePrice** is often our ML target variable, trying to predict sales price based on other factors. Let's look at the top 10 variables related to SalePrice.
###Code
k = 10
cols = house_cor.nlargest(k, 'SalePrice')['SalePrice'].index
#Numpy corrcoef gives a pearson correlation coefficient
cm = np.corrcoef(house[cols].values.T)
sns.set(font_scale = 1.25)
f, ax = plt.subplots(figsize=(10,8))
hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size':14},yticklabels=cols.values, xticklabels=cols.values)
plt.show
###Output
_____no_output_____
###Markdown
Statistical selection **Jason Brownlee** very helpfully breaks statistics-based feature selection down based on **input variable** and **output type.** Either one can be numerical or categorical. The output variable type will be determined by the type of problem you are solving.**Output Variable**:* Numerical -- Regression predictive modeling.* Categorical -- Classification predictive modeling.Pearson's was demonstrated above with the correlation matrix and heatmaps. Other methods will be demonstrated later and throughout your education. Feature importance -- RandomForest One of my favorite methods of choosing features is to let the RandomForest algorithm tell me what it decided was important.Tree-based algorithms operate very much like a single *binary tree*. A starting point is determined, then a decision is made to get down to the next level, over and over, until an answer is reached. The graph above showing variable selection is a good example. Classification and regression trees (CART) track the importance of the variables at the decision points.A **RandomForest** uses a whole bunch of decision trees to solve a problem, and because many trees are used, many solutions can be tried, improving results. Let's use a `RandomForestRegressor` on the housing dataset and see if it agrees with what our heatmap told us.**First**, the RandomForestRegressor will only use numeric data, so we are going to ignore the categorical columns, for now. a truly complete picture would include them, of course. The correlation matrix dataset above should give us the column names of the numeric columns, so we'll just make a subset using it, but first we'll have to deal with **missing values**. In this case, we will drop them. **If I were really trying to do predictions, I would impute them.**
###Code
house_cor.columns
#missing data
total = house.isnull().sum().sort_values(ascending=False)
percent = (house.isnull().sum()/house.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
#dealing with missing data
house = house.drop((missing_data[missing_data['Total'] > 1]).index,1)
house = house.drop(house.loc[house['Electrical'].isnull()].index)
house.isnull().sum().max() #just checking that there's no missing data missing...
house_cor = house.corr()
X = house[house_cor.columns[:-1]]
X
y = house['SalePrice'].values
y
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(X,y)
importance_list = list(zip(X.columns, model.feature_importances_))
sorted_importance = sorted(importance_list, key=lambda x: x[1], reverse=True)
sorted_importance
# importance = model.feature_importances_
# X.columns
###Output
_____no_output_____
###Markdown
You would need to make a decision at what point a feature is not important enough to use. **Many times I'll throw in a column of random numbers and use all the columns that are more important than the randoms.**Here is a really fancy way to display the list -- I'll leave it up to you to figure out how it works.
###Code
max_feature_len = len(max(X.columns, key=len))
for feature, rank in sorted_importance:
dots = max_feature_len - len(feature)
print(f'{feature}: {"."*dots} {rank*100:.2f}%')
###Output
OverallQual: .. 58.98%
GrLivArea: .... 11.11%
TotalBsmtSF: .. 4.33%
2ndFlrSF: ..... 4.15%
BsmtFinSF1: ... 3.22%
1stFlrSF: ..... 2.72%
GarageCars: ... 2.12%
GarageArea: ... 1.83%
LotArea: ...... 1.81%
YearBuilt: .... 1.57%
YearRemodAdd: . 1.08%
TotRmsAbvGrd: . 0.78%
WoodDeckSF: ... 0.68%
BsmtUnfSF: .... 0.67%
FullBath: ..... 0.66%
OpenPorchSF: .. 0.65%
OverallCond: .. 0.63%
Fireplaces: ... 0.53%
MoSold: ....... 0.51%
BedroomAbvGr: . 0.28%
MSSubClass: ... 0.28%
YrSold: ....... 0.26%
ScreenPorch: .. 0.20%
EnclosedPorch: 0.16%
HalfBath: ..... 0.15%
KitchenAbvGr: . 0.15%
BsmtFullBath: . 0.14%
PoolArea: ..... 0.12%
BsmtFinSF2: ... 0.11%
3SsnPorch: .... 0.06%
LowQualFinSF: . 0.03%
BsmtHalfBath: . 0.02%
MiscVal: ...... 0.02%
###Markdown
---The section below doesn't really relate to anything beyond some advanced Pandas usage. I just didn't want to throw it away after I got that far and then decided to go a different way. Bonus -- finding average weights by gender Another way to deal with missing 'sex' entries is to try to fill in the values based on weight (or mass, in this case). The cells below make a dataset of the min, max and mean for each species as a start to this method.
###Code
penguins[(penguins.species == 'Adelie') & (penguins.sex == 'Male')].body_mass_g.max()
penguins[(penguins.species == 'Adelie') & (penguins.sex == 'Male')].body_mass_g.min()
###Output
_____no_output_____
###Markdown
It's great to be able to figure these out one at a time, but what if we had 100 speies? We need to generalize this.
###Code
peng_mass = penguins.groupby(['species','sex']).agg({'body_mass_g':['min','max', 'mean']})
peng_mass
type(peng_mass)
###Output
_____no_output_____
###Markdown
--- OK, this is good. this gave us min and max ranges for each species, but our resulting dataframe is some weird multi-indexed thing. We can **reset the index** to fix some of it:
###Code
# Remember, this isn't permanent until we use an assignment ('=')
peng_mass.reset_index()
###Output
_____no_output_____
###Markdown
---But, the min, max, and mean columns are still oddly-named and difficult to access, as seen below:
###Code
peng_mass = peng_mass.reset_index()
peng_mass.iloc[0]
peng_mass.columns
###Output
_____no_output_____
###Markdown
---OK, one more try. Let's look at the `to_flat_index()` function:
###Code
peng_mass.columns.to_flat_index()
###Output
_____no_output_____
###Markdown
---Getting closer, maybe. Let's combine with a string `join()` function:
###Code
peng_mass.columns.to_flat_index().str.join('_')
###Output
_____no_output_____
###Markdown
---That looks pretty good. Let's make it permanent and see what the columns look like:
###Code
peng_mass.columns = peng_mass.columns.to_flat_index().str.join('_')
peng_mass
###Output
_____no_output_____
###Markdown
---OK, at this point we have options how to fill in the missing values based on our new table. Unfortunately, there is overlap in the weight ranges between males and females, so we can't just check which range the unknown falls into.
###Code
penguins[penguins.sex.isnull()]
peng_mass[(peng_mass.species_=='Adelie') & (peng_mass.sex_=='Female')].body_mass_g_mean
3700 - 3368
3700 - 4043
###Output
_____no_output_____
|
data_analysis/plot_behavioral.ipynb
|
###Markdown
plot behavioral dataAn older version used to plot behavioral data
###Code
import sys
import os.path as op
import numpy as np
import pandas as pd
import mne
import matplotlib.pyplot as plt
import pickle
module_path = op.abspath(op.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
from data_analysis.functions_preprocessing import split_raws
from data_analysis.functions_behavioral import \
(create_event_df, remove_ghost_triggers, calculate_alpha,
join_event_dfs, remove_outliers, events_from_event_df)
# Create path to the folder "behavioral"
behav_dir = "/net/store/nbp/projects/hyperscanning/study_project/NBP_Hyperscanning/data_analysis/behavioral_data"
subject_dir = '/net/store/nbp/projects/hyperscanning/hyperscanning-2.0/mne_data/sourcedata/'
"""
pair = 202
participant = 0
# 1.2 define the subjects id and its path
subj_id = "sub-{0}_p-{1}".format(pair, participant)
subs_path = subject_dir + "sub-{0}/eeg/sub-{0}_task-hyper_eeg.fif".format(pair)
behav_path = op.join(behav_dir, str(pair) + ".csv")
# load the data
combined_raw = mne.io.read_raw_fif(subs_path, preload=True)
"""
all_subs = pd.DataFrame()
bad_list = []
for subs in ['202','203','204','205','206','207','208','209','211','212']:
behav_path = op.join(behav_dir, "{}.csv".format(subs))#test
# calculate the alpha on all trials
df = calculate_alpha(pd.read_csv(behav_path))
# our filtering condition: take vp 1 for odd trials and vp 2 for even trials
df = df[df["trial"]%2 != df["subject"] - 1]
bad_trls = len(df[df["alpha"] > 360]["trial"].unique())
per_a = bad_trls / df["trial"].max() * 100
print("Pair {0} - Percent of incorrect trials: {1}%".format(subs, per_a))
bad_list.append(per_a)
all_subs = all_subs.append(df)
all_subs["unique_trials"] = all_subs["pair"].astype(str) + "-" + all_subs["trial"].astype(str)
bad_trls = len(all_subs[all_subs["alpha"] > 360]["unique_trials"].unique())
per_a = bad_trls / len(all_subs["unique_trials"].unique()) * 100
print("All subs - Percent of incorrect trials: {0} %".format(per_a))
# reset index
all_subs.index = range(len(all_subs))
#print(all_subs["unique_trials"])
# get all taps where one person tapped twice before the other person did
all_subs[all_subs["alpha"] > 360]
# get all the trials that were affected by these double taps
len(all_subs[all_subs["alpha"] > 360]["trial"].unique())
###Output
_____no_output_____
###Markdown
Behavioral Plots
###Code
# import statsmodels.api as sm
from scipy import stats
from data_analysis.functions_behavioral import \
(create_event_df, remove_ghost_triggers, calculate_alpha,
join_event_dfs, remove_outliers, events_from_event_df)
# set basic parameters:
bin_size = int(np.ceil(np.sqrt(len(all_subs))))
# set path to save plots
behav_plots = "/net/store/nbp/projects/hyperscanning/study_project/til/behavioral_plots/"
bin_size
###Output
_____no_output_____
###Markdown
1 - Boxplot of incorrect trial distribution
###Code
# Boxplot of incorrect trial distribution
# display(np.median(bad_list))
plt.boxplot(bad_list)
plt.ylabel('Percent')
plt.xlabel('Incorrect trials')
plt.title('Distribution of Incorrect Trials over Subjects')
plt.tick_params(
axis='x', # changes apply to the x-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
labelbottom=False) # labels along the bottom edge are off
# create and save fig object
bad_trials = plt.gcf()
#pickle.dump(bad_trials, open(behav_plots + "bad_trials.p", "wb"))
###Output
_____no_output_____
###Markdown
2 - Distribution of all valid alphas
###Code
# get the distribution of valid alphas
#all_subs[all_subs["alpha"] <= 360]["alpha_lin"].plot.hist(bins=len(all_subs)//20)
all_subs[all_subs["alpha"] <= 360]["alpha_lin"].plot.hist(bins=bin_size)
plt.xlabel('Alpha (linearised)')
plt.ylabel('Occurance')
plt.title('All valid alphas (alpha<360°)')
# create fig object
alpha_all = plt.gcf()
# save fig object
#pickle.dump(alpha_all, open(behav_plots + "alpha_all.p", 'wb'))
np.ceil(np.sqrt(len(all_subs)))
###Output
_____no_output_____
###Markdown
3 - Distribution of all valid alphas per tapnr
###Code
small_subs = all_subs[all_subs["alpha"] <= 360]
small_subs.groupby("tapnr")["alpha_lin"].plot.hist(bins=bin_size, alpha=0.4)
plt.legend(['Tap 1','Tap 2','Tap 3','Tap 4','Tap 5','Tap 6','Tap 7','Tap 8'])
plt.xlabel('Alpha(linearised)')
plt.ylabel("Occurance")
plt.title('Valid alphas (alpha<360°) - tapwise')
# create fig object
alpha_tapwise = plt.gcf()
# save fig object
#pickle.dump(alpha_tapwise, open(behav_plots + "alpha_tapwise.p", 'wb'))
###Output
_____no_output_____
###Markdown
4 - Taps in first 1.5 seconds vs. last 1.5 seconds
###Code
# save time of first and last tap per trial in dataframe
first_tap = all_subs.index - all_subs.index[all_subs.index%9]
all_subs["first_tap"] = all_subs["ttap"][first_tap].to_numpy()
last_tap = all_subs.index - all_subs.index[(all_subs.index%9)] +8
all_subs["last_tap"] = all_subs["ttap"][last_tap].to_numpy()
# TODO: Perform a Shapiro-Wilk normality test
# TODO: Test H0
# only use alpha values of correct trials
all_subs = all_subs[all_subs["alpha"] <= 360]
# Plot those alpha values that occur within the range of first tap + 1.5s and last tap - 1.5s (of each trial)
all_subs[all_subs["ttap"] <= all_subs.first_tap+1.5 ]["alpha_lin"].plot.hist(bins=bin_size)
all_subs[all_subs["ttap"] >= all_subs.last_tap-1.5 ]["alpha_lin"].plot.hist(bins=bin_size)
plt.suptitle('Early vs. late taps')
plt.legend(['First 1.5s','Last 1.5s'])
plt.xlabel('Alpha (linearised)')
plt.ylabel('Occurance')
# create fig object
early_vs_late_taps = plt.gcf()
# save fig object
#pickle.dump(early_vs_late_taps, open(behav_plots + "early_vs_late.p", 'wb'))
len(all_subs[all_subs["ttap"] >= all_subs.last_tap-1.5 ]["alpha_lin"])
# only use alpha values of correct trials
all_subs = all_subs[all_subs["alpha"] <= 360]
# Plot those alpha values that occur within the range of first tap + 1.5s and last tap - 1.5s (of each trial)
first = all_subs[all_subs["ttap"] <= all_subs.first_tap+1.5 ]
first_mean = np.mean(first["alpha_lin"])
first_median = np.median(first["alpha_lin"])
first_mode,xyz = stats.mode(np.around(first["alpha_lin"], decimals=2))
#first_mode = max(first["alpha_lin"], key = first["alpha_lin"].count)
last = all_subs[all_subs["ttap"] >= all_subs.last_tap-1.5 ]
last_mean = np.mean(last["alpha_lin"])
last_median = np.median(last["alpha_lin"])
last_mode,xyz2 = stats.mode(np.around(last["alpha_lin"], decimals=2))
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(15,5), sharey=True, sharex=True)
plt.suptitle('Early vs. late taps')
first["alpha_lin"].plot.hist(bins=bin_size, ax = axes[0], logx=True)
last["alpha_lin"].plot.hist(bins=bin_size, ax = axes[1], color='orange', logx=True)
axes[0].set_xlabel("Alpha (linearised)")
axes[0].set_ylabel("Occurance")
axes[0].set_title("First 1.5 seconds")
axes[1].set_xlabel("Alpha (linearised)")
axes[1].set_title("Last 1.5 seconds")
names = ["mode", "median", "mean"]
colors = ['red', 'blue', 'green']
first_measurement = [first_mode, first_median, first_mean]
last_measurement = [last_mode, last_median, last_mean]
#axes[0].axvline(x=first_median, linestyle='--', linewidth=2.5, label='median', c='green')
#axes[0].axvline(x=first_mean, linestyle='--', linewidth=2.5, label='mean', c='blue')
#axes[0].axvline(x=first_mode, linestyle='--', linewidth=2.5, label='mode', c='red')
for first_measurement, name, color in zip(first_measurement, names, colors):
axes[0].axvline(x=first_measurement, linestyle='--', linewidth=2.5,
label='{0} at {1}'.format(name, first_measurement), c=color)
axes[0].legend();
for last_measurement, name, color in zip(last_measurement, names, colors):
axes[1].axvline(x=last_measurement, linestyle='--', linewidth=2.5,
label='{0} at {1}'.format(name, last_measurement), c=color)
axes[1].legend();
# create fig object
#early_vs_late_taps = plt.gcf()
# save fig object
#pickle.dump(early_vs_late_taps, open(behav_plots + "early_vs_late.p", 'wb'))
#len(all_subs[all_subs["ttap"] >= all_subs.last_tap-1.5 ]["alpha_lin"])
###Output
_____no_output_____
###Markdown
4.2 early vs. late - pairwise 4.3 - Statistics
###Code
all_subs = all_subs[all_subs["alpha"] <= 360]
first_taps = all_subs[all_subs["ttap"] <= all_subs.first_tap+1.5 ]["alpha_lin"]
last_taps = all_subs[all_subs["ttap"] >= all_subs.last_tap-1.5 ]["alpha_lin"]
#ks, p = kstest(first_taps, "norm")
alpha_level = .05
shap_f, p_f = stats.shapiro(first_taps)
shap_l, p_l = stats.shapiro(last_taps)
if p_f < alpha_level:
print("Alpha values of first taps are not normal distributed")
else:
print("Alpha values of first taps are not normal distributed")
if p_l < alpha_level:
print("Alpha values of last taps are not normal distributed")
else:
print("Alpha values of last taps are not normal distributed")
# Test for difference between first and last taps
mws, p = stats.mannwhitneyu(first_taps,last_taps)
mws,p
if p < alpha_level:
print("reject H0")
else:
print("H0 not rejectable")
print(len(first_taps),len(last_taps))
###Output
8890 6368
###Markdown
4.2 - Boxplot early vs. late
###Code
plt.boxplot([all_subs[all_subs["ttap"] <= all_subs.first_tap+1.5 ]["alpha_lin"],
all_subs[all_subs["ttap"] >= all_subs.last_tap-1.5 ]["alpha_lin"]])
#display(np.median(all_subs[all_subs["ttap"] <= all_subs.first_tap+1.5 ]["alpha_lin"]))
#display(np.median(all_subs[all_subs["ttap"] >= all_subs.last_tap-1.5 ]["alpha_lin"]))
plt.title("Early vs. late taps")
plt.ylabel("Alpha (linearised)")
plt.xticks(ticks=np.arange(1,3),labels=["First 1.5s","Last 1.5s"])
# create fig object
boxplot_early_vs_late = plt.gcf()
# save fig object
#pickle.dump(boxplot_early_vs_late, open(behav_plots + "boxplot_early_vs_late.p", 'wb'))
###Output
_____no_output_____
###Markdown
5 - Alpha over Blocks
###Code
all_subs = all_subs[all_subs["alpha"] <= 360]
alpha_block = all_subs.groupby("block")
alpha_block["alpha_lin"].plot.hist(bins=bin_size)
# create fig object
alpha_blocks = plt.gcf()
# save fig object
#pickle.dump(alpha_blocks, open(behav_plots + "alpha_blocks.p", 'wb'))
plt.boxplot([all_subs[all_subs["block"]==1]["alpha_lin"],
all_subs[all_subs["block"]==2]["alpha_lin"],
all_subs[all_subs["block"]==3]["alpha_lin"],
all_subs[all_subs["block"]==4]["alpha_lin"],
all_subs[all_subs["block"]==5]["alpha_lin"],
all_subs[all_subs["block"]==6]["alpha_lin"],
all_subs[all_subs["block"]==7]["alpha_lin"],
all_subs[all_subs["block"]==8]["alpha_lin"],
all_subs[all_subs["block"]==9]["alpha_lin"],
all_subs[all_subs["block"]==10]["alpha_lin"],
all_subs[all_subs["block"]==11]["alpha_lin"],
all_subs[all_subs["block"]==12]["alpha_lin"]]
)
plt.title("Alpha over Blocks")
plt.ylabel("Alpha (linearised)")
plt.xlabel("Block Nr.")
plt.xticks(ticks=np.arange(1,13),labels=range(1,13))
# create fig object
boxplot_blocks = plt.gcf()
# save fig object
#pickle.dump(boxplot_blocks, open(behav_plots + "boxplot_blocks.p", 'wb'))
###Output
_____no_output_____
|
impedancia/ModeladoInversoGEMTIPsp.ipynb
|
###Markdown
Modelado Inverso Cole-ColePor David A. Miranda, PhD2020 La Teoría Generalizada del Medio Efectivo de la Polarización Inducida, GEMTIP, fue formulada por el profesor [Zhadanov en 2008](http://www.cemi.utah.edu/PDF_70_10/2008b.pdf). En esta teória se modela las propiedades eléctricas de un medio heterogéneo por medio de uno homogéneo efectivo, de manera análoga a como un cuando de un circuito eléctrico complejo, constituido por muchos elementos, se obtiene el circuito equivalente.Para el caso de inclusiones esféricas, la conductividad eléctrica efectiva $\sigma_e$ está dada por:$$\sigma_e = \sigma_0 \left\{ 1 + \sum_{l=1}^N f_l M_l \left[ 1 - \frac{1}{1 + (j\omega\tau_l)^{c_l}} \right] \right\}$$Donde $\sigma_0 = 1/\rho_0$ es la conductividad eléctrica del medio soporte; $f_l$, la fracción de volumen que ocupan las inclusiones $l$-ésimas; $M_l = 3 \frac{\rho_0 - \rho_l}{2\rho_l + \rho_0}$, la polarizabilidad de las inclusiones $l$-ésimas; $\rho_l = 1/\sigma_l$, la resistividad eléctrica de las inclusiones $l$-ésimas; $\omega$, la frecuencia angular de la perturbación externa; $\tau_l$, el tiempo de relajación de las inclusiones $l$-ésimas y $c_l$, el parámetro de relajación de las inclusiones $l$-ésimas.Con este Jupyter Notebook aprenderás a realizar el modelado inverso de GEMTIP con inclusiones esféricas con una adaptación del algoritmo descrito en [Miranda et. al. 2014](https://link.springer.com/article/10.1007/s10008-013-2262-5). Para el modelo a invertir deberás escoger el número de tipos de inclusiones diferentes $N$. El resultado del modelo inverso serán los parámetros, organizados en un diccionario con la siguiente estructura:````{ 'rho_0' : R0, 'rho_l' : [ ], 'f_l' : [ ], 'tau_l' : [ ], 'c_l' : [ ],}````Donde R0 es la resistividad del medio soporte y **\[ \]**, una lista con $N$ valores para el respectivo parámetro. 1. Importa las librerias
###Code
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import basinhopping
import json
###Output
_____no_output_____
###Markdown
2. Datos de entrada para el modelado inverso
###Code
N = 3 # Número de tipos de inclusiones
f = [ 1.00000000e-02, 3.72759372e-02, 1.38949549e-01, 5.17947468e-01,
1.93069773e+00, 7.19685673e+00, 2.68269580e+01, 1.00000000e+02,
3.72759372e+02, 1.38949549e+03, 5.17947468e+03, 1.93069773e+04,
7.19685673e+04, 2.68269580e+05, 1.00000000e+06] # Frecuencia
R = [ 17.59927455, 17.40944414, 17.29185005, 17.07900901, 16.52842653,
15.31843638, 13.85775394, 13.00050263, 12.63738157, 12.35519707,
11.45672164, 9.53041417, 8.6294919 , 8.45819132, 8.4246839 ] # Parte real de la impedancia
I = [ -0.46078344, -0.25081367, -0.26887919, -0.50916453, -1.00667121,
-1.48341894, -1.30057526, -0.7756347 , -0.52414666, -0.75480322,
-1.54439525, -1.53964853, -0.64887503, -0.21134211, -0.06569951] # Parte imaginaria de la impedancia
###Output
_____no_output_____
###Markdown
3. Modelo inversoEl modelo inverso propuesto por [Miranda et. al. 2014](https://link.springer.com/article/10.1007/s10008-013-2262-5) se basa en la optimización del error cuadrático medio. En esta implementación se tomó $\varsigma = 0$, es decir, se optimiza solo la función $OF_1$, definida en la ecuación (23) del artículo.Los parámetros se organizan en una lista, con el siguiente orden. El número de dispersiones se infiere del número de parámetros.$$ p = [R_\infty, \Delta Q_1, \tau_1, \alpha_1, \Delta Q_2, \tau_2, \alpha_2, ..., \Delta Q_N, \tau_N, \alpha_N] $$
###Code
def gemtip_sh(params, f, verbose=False):
w = 2 * np.pi * f
params_keys = ['rho_0','rho_l', 'f_l', 'tau_l', 'c_l']
sigma_e = np.array([])
# Verificación de parámetros #
if type(params) != type({}):
if verbose:
print('Warning! The params must be a dictionary.')
return sigma_e
this_keys = params.keys()
for key in params_keys:
if not key in this_keys:
if verbose:
print('Warning! The parameter %s information was omited in params.' % key)
return sigma_e
N = [ len(params[key]) for key in params_keys[1:] ]
if np.std(N) != 0:
if verbose:
print('Warning in the number of parameters *_l, please, review.')
return sigma_e
rho_0 = params['rho_0']
rho_l = np.array(params['rho_l'])
f_l = np.array(params['f_l'])
tau_l = np.array(params['tau_l'])
c_l = np.array(params['c_l'])
M_l = 3 * (rho_0 - rho_l) / (2 * rho_l + rho_0)
if np.sum(f_l) >= 1:
if verbose:
print('Warning! The sum of all f_l must be less than one.')
return sigma_e
#############################
w = w.reshape(len(w), 1)
sum_elements = 0
for fl, Ml, Tl, cl in zip(f_l, M_l, tau_l, c_l):
sum_elements += fl*Ml * (1 - 1 / ( 1 + (1j * w * Tl) ** cl ))
sigma_e = (1 + sum_elements)/rho_0
return 1/sigma_e
def p_to_params(p, R0=1):
N = int( (len(p) - 1) / 4 )
params = {
'rho_0' : R0*p[0],
'rho_l' : R0*p[1:N+1],
'f_l' : p[N+1:2*N+1],
'tau_l' : p[2*N+1:3*N+1],
'c_l' : p[3*N+1:],
}
return params
def params_to_p(params, R0=1):
p = [params['rho_0']/R0]
p = p + list(params['rho_l']/R0)
for key in ['rho_l', 'f_l', 'tau_l', 'c_l']:
p = p + list(params[key])
return p
def OF1(f, Z, p, sigma_R=0.2, R0=1, verbose=False):
if np.any(np.array(p) < 0): # Función de castigo para parámetros negativos
return 10
params = p_to_params(p, R0)
if sum(params['f_l']) > 1:
return 10
if np.any(params['rho_l'] > params['rho_0']):
return 10
if np.any(params['c_l'] > 1):
return 10
Zm = gemtip_sh(params, f, verbose=False)
if len(Zm) == 0: # Función de castigo para parámetros inválidos
return 100
Z = Z.reshape(len(Z))
Zm = Zm.reshape(len(Zm))
of_R = ( 1 - Zm.real/Z.real )**2
of_I = ( 1 - Zm.imag/Z.imag )**2
return sum(sigma_R * of_R + (1-sigma_R) * of_I)
def plot(f, Z, fig=None, ax=None, axis_real=[], axis_imag=[], dpi=120, marker='o', label=''):
ax1 = ax
if fig is None or ax is None:
fig, ax1 = plt.subplots(dpi=dpi)
color = 'tab:red'
if label == '':
ax1.semilogx(f, Z.real, marker, color=color)
if label != '':
ax1.semilogx(f, Z.real, marker, color=color, label=label)
ax1.set_xlabel('Frecuencia [Hz]')
ax1.set_ylabel(r'real{Z} $[\Omega]$', color=color)
ax1.tick_params(axis='y', labelcolor=color)
if len(axis_real) != 0:
ax1.axis(axis_real)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.semilogx(f, -Z.imag, marker, color=color)
ax2.set_ylabel(r'-imag{Z} $[\Omega]$', color=color)
ax2.tick_params(axis='y', labelcolor=color)
if len(axis_imag) != 0:
ax2.axis(axis_imag)
def func(p, f=[], Z=[], sigma_R=0.1, R0=1):
return OF1(f, Z, p, sigma_R=0.2, R0=R0)
def modelo_inverso(f, R, I, N, p0=[], sigma_R=0.4, niter=250, verbose=True):
if type(f) == type([]):
f = np.array(f)
if int(N) != N:
if verbose:
print('Error! N must be integer.')
return []
if np.std([len(v) for v in [f, R, I]]) != 0:
if verbose:
print('Error! The length of the f, R, and I vector must be equal.')
return (3*N+1)*[np.NaN]
Rl = np.linspace(min(R), 0.9*max(R), N)
R0 = max(R)
if len(p0) == 0: # Permite pasar valores iniciales
f_prototipe = np.logspace(np.log10(0.1*min(f)), np.log10(2*max(f)), N)
tau = 1/(2*np.pi*f_prototipe)
c = np.random.rand(N)
fl = 0.7/N
p0 = [1] + list(Rl/R0) + N*[fl] + list(tau) + list(c)
Z = np.array(R) + 1j * np.array(I)
minimizer_kwargs = {
'method': 'BFGS',
'args' : (f, Z, sigma_R, R0),
}
p = basinhopping(func, p0, minimizer_kwargs=minimizer_kwargs, niter=niter)
fun_opt = p['fun']
if verbose:
print(' fun:', fun_opt)
print(' message:', p['message'])
return p_to_params(p.x, R0), fun_opt
###Output
_____no_output_____
###Markdown
4. Simulación
###Code
max_niter = 500
max_repeat_num = 1000
fun_goal = 1e-3
verbose = False
fun_opt = 100; params = {} # Iniciación de variables de salida
print('Starting the inverse model.\n This process take a while ...')
iter = 0
i = 0
i_fopt = 0
while fun_opt > fun_goal and iter < max_repeat_num: # Repite y toma el mejor modelo
iter += 1
if iter % 5 == 0:
i += 1
niter = 50 + 5*i if 50 + 5*i < max_niter else max_niter
new_p, new_f = modelo_inverso(f, R, I, N, niter=niter, verbose=verbose)
if new_f < fun_opt:
params = new_p
fun_opt = new_f
if fun_opt < 1: # Fuerza búsqueda alrededor del último p con fun menor de 1
i = 0
print(' fun: ', fun_opt)
p0 = params_to_p(params, R0=params['rho_0'])
new_p, new_f = modelo_inverso(f, R, I, N, p0=p0, niter=max_niter, verbose=verbose)
if new_f < fun_opt:
params = new_p
fun_opt = new_f
params_with_list = {}
for k, v in zip(params.keys(), params.values()): # Recodifica params para imprimir como json
if k == 'rho_0':
params_with_list[k] = v
continue
params_with_list[k] = list(v)
params_as_json = json.dumps(params_with_list, indent=4)
print('iter: %d, niter: %d' % (iter, niter))
print('Bests fun:', fun_opt)
print('params:', params_as_json)
Z = np.array(R) + 1j * np.array(I)
fm = np.logspace(np.log10(min(f)), np.log10(max(f)), 1000)
Zm = gemtip_sh(params, fm, verbose=True)
axis_imag = [ min(f),
max(f),
0.7*min( min(-Z.imag), min(-Zm.imag) ),
1.2*max( max(-Z.imag), max(-Zm.imag) )]
if len(Zm) > 0:
fig, ax = plt.subplots(dpi=120)
plot(f, Z, fig=fig, ax=ax,
axis_imag=axis_imag,
label='Datos')
plot(fm, Zm, marker='', fig=fig, ax=ax,
axis_imag=axis_imag,
label='Modelo Inverso')
_ = ax.legend()
###Output
_____no_output_____
|
tensorflow_demo__bmc.ipynb
|
###Markdown
http://www.bmc.com/blogs/introduction-to-tensorflow-and-logistic-regression/
###Code
import pandas
import tensorflow as tf
import tempfile
hours = [0.50,0.75,1.00,1.25,1.50,1.75,1.75,2.00,2.25,2.50,2.75,3.00,3.25,3.50,4.00,4.25,4.50,4.75,5.00,5.50]
passx = [0,0,0,0,0,0,1,0,1,0,1,0,1,0,1,1,1,1,1,1]
df = pandas.DataFrame(passx)
df['hours'] = hours
df.columns = ['pass', 'hours']
h = df['hours'].apply(lambda x: x * 100).astype(int)
df['hours']=h
df
def input_fn(df):
labels = df["pass"]
return tf.estimator.inputs.pandas_input_fn(x=df,
y=labels,
batch_size=100,
num_epochs=10,
shuffle=False,
num_threads=5)
model_dir = tempfile.mkdtemp()
print("model directory = %s" % model_dir)
hours = tf.feature_column.numeric_column("hours")
base_columns = [hours]
m = tf.estimator.LinearClassifier(model_dir=model_dir, feature_columns=base_columns)
m.train(input_fn(df),steps=None)
results = m.evaluate(input_fn(df),steps=None)
for key in sorted(results):
print("%s: %s" % (key, results[key]))
###Output
accuracy: 0.5
accuracy_baseline: 0.5
auc: 0.805
auc_precision_recall: 0.832721
average_loss: 1.89498
global_step: 10
label/mean: 0.5
loss: 189.498
precision: 0.5
prediction/mean: 0.956125
recall: 1.0
|
Machine Learning - Intro and Intermediate/Machine Learning - Intro/exercise-explore-your-data.ipynb
|
###Markdown
**[Machine Learning Course Home Page](https://www.kaggle.com/learn/machine-learning)**--- This exercise will test your ability to read a data file and understand statistics about the data.In later exercises, you will apply techniques to filter the data, build a machine learning model, and iteratively improve your model.The course examples use data from Melbourne. To ensure you can apply these techniques on your own, you will have to apply them to a new dataset (with house prices from Iowa).The exercises use a "notebook" coding environment. In case you are unfamiliar with notebooks, we have a [90-second intro video](https://www.youtube.com/watch?v=4C2qMnaIKL4). ExercisesRun the following cell to set up code-checking, which will verify your work as you go.
###Code
# Set up code checking
from learntools.core import binder
binder.bind(globals())
from learntools.machine_learning.ex2 import *
print("Setup Complete")
###Output
Setup Complete
###Markdown
Step 1: Loading DataRead the Iowa data file into a Pandas DataFrame called `home_data`.
###Code
import pandas as pd
# Path of the file to read
iowa_file_path = '../input/home-data-for-ml-course/train.csv'
# Fill in the line below to read the file into a variable home_data
home_data = pd.read_csv(iowa_file_path)
# Call line below with no argument to check that you've loaded the data correctly
step_1.check()
# Lines below will give you a hint or solution code
#step_1.hint()
#step_1.solution()
###Output
_____no_output_____
###Markdown
Step 2: Review The DataUse the command you learned to view summary statistics of the data. Then fill in variables to answer the following questions
###Code
# Print summary statistics in next line
home_data.describe()
# What is the average lot size (rounded to nearest integer)?
avg_lot_size = 10517
# As of today, how old is the newest home (current year - the date in which it was built)
newest_home_age = 11
# Checks your answers
step_2.check()
#step_2.hint()
# step_2.solution()
###Output
_____no_output_____
|
test_adwh_conn.ipynb
|
###Markdown
Test ADWH connection* simplified from the versione inside Conda NB
###Code
import cx_Oracle as cxo
import ads
from ads.database import connection
from ads.database.connection import Connector
from ads.dataset.factory import DatasetFactory
from sqlalchemy import create_engine
from urllib.request import urlopen
import os
import warnings
import logging
import pandas as pd
warnings.filterwarnings("ignore", category=DeprecationWarning)
logging.basicConfig(format="%(levelname)s:%(message)s", level=logging.INFO)
print(cxo.__version__)
print(ads.__version__)
# credential = {'database_name': 'dbtst1_medium',
# 'username': 'DEMO1',
# 'password': 'Pippo12345::',
# 'database_type':'oracle'}
# memorizzo in un repository locale
# in questo modo nel NB non c'è la pwd
# che ho memorizzato prima nel repo (vedi istruzione commentata)
database_name = "dbtst1_medium"
# connection.update_repository(key=database_name, value=credential)
repo = connection.get_repository(key=database_name)
# import the wallet
wallet_path = os.path.join(
os.path.expanduser("~/data-science-bp"), f"Wallet_{database_name}.zip"
)
connection.import_wallet(wallet_path=wallet_path, key=database_name)
if "database_name" in globals() and database_name != "<database_name>":
connector = Connector(key=database_name)
print(connector.config)
print()
print(f"Connector uri is: {connector.uri}")
else:
print("Skipping as it appears that you do not have database_name configured.")
#
# Here: definisco la query che voglio eseguire sul DB
#
query = """
SELECT * from CUSTOMERS where rownum < 50
"""
%%time
# eseguo la query e carico tutto in un Pandas DataFrame
customers_df = DatasetFactory.open(connector.uri, format="sql", table=query)
type(customers_df)
df = customers_df.to_pandas_dataframe()
type(df)
df.tail(10)
print(f"The number of records read is {customers_df.shape[0]}")
###Output
The number of records read is 49
###Markdown
Using directly Pandas (with our extensions)* idea taken from: https://github.com/oracle/accelerated-data-science
###Code
connection_parameters = {
"user_name": repo["username"],
"password": repo["password"],
"service_name": repo["database_name"],
"wallet_location": "/home/datascience/data-science-bp/Wallet_dbtst1_medium.zip",
}
df = pd.DataFrame.ads.read_sql(
"SELECT * FROM CUSTOMERS",
connection_parameters=connection_parameters,
)
df.shape
###Output
_____no_output_____
###Markdown
if we want to use bind variables:
###Code
df = pd.DataFrame.ads.read_sql(
"""SELECT * FROM CUSTOMERS WHERE ROWNUM <= :max_rows
""",
bind_variables={
"max_rows" : 20
},
connection_parameters=connection_parameters
)
df.shape
###Output
_____no_output_____
|
project2/group2-basicanalysis-project2.ipynb
|
###Markdown
View all available datasets
###Code
import os
print("List of all available datasets:")
for entry in os.scandir('./data'):
if entry.is_file():
print("-- " + entry.name)
###Output
List of all available datasets:
-- mergedData2.csv
-- demographic.csv
-- cleaned_hm.csv
-- mergedData.csv
###Markdown
--- Import necessary libraries
###Code
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
pd.options.display.max_colwidth = 1000
###Output
_____no_output_____
###Markdown
--- Demographic dataset explorations
###Code
data_demo = pd.read_csv("data/demographic.csv")
data_demo.shape
data_demo.columns
data_demo.sample(5)
print("Number of unique countries:", data_demo['country'].unique().shape[0])
print("which are:", data_demo['country'].unique())
print("Number of unique marital status:", data_demo['marital'].unique().shape[0])
print("which are:", data_demo['marital'].unique())
###Output
Number of unique marital status: 6
which are: ['married' 'single' 'divorced' 'separated' 'widowed' nan]
###Markdown
--- Cleaned dataset explorations
###Code
data_happy = pd.read_csv("data/cleaned_hm.csv")
data_happy.shape
data_happy.columns
data_happy.sample(5)
print("Number of unique reflection periods:", data_happy['reflection_period'].unique().shape[0])
print("which are:", data_happy['reflection_period'].unique())
print("Number of unique types of number of sentences:", data_happy['num_sentence'].unique().shape[0])
print("which are:", data_happy['num_sentence'].unique())
print("Number of unique ground truth categories:", data_happy['ground_truth_category'].unique().shape[0])
print("which are:", data_happy['ground_truth_category'].unique())
print("Number of unique predicted categories:", data_happy['predicted_category'].unique().shape[0])
print("which are:", data_happy['predicted_category'].unique())
###Output
Number of unique predicted categories: 7
which are: ['affection' 'exercise' 'bonding' 'leisure' 'achievement'
'enjoy_the_moment' 'nature']
###Markdown
--- Qualitative analysis
###Code
data = pd.read_csv("data/mergedData2.csv")
data.columns
###Output
_____no_output_____
###Markdown
Dimensions of datasetFeatures: age group, country, gender, marital status, parenthood, reflection periodOutcome: predicted category, unigram, bigram
###Code
data_group_1 = data[(data["country"] == "USA") & (data["marital"] == "married") & (data["reflection_period"] == "3m")]
data_group_1.shape
from sklearn.feature_extraction.text import CountVectorizer
word_vectorizer = CountVectorizer(ngram_range=(1,2), analyzer='word')
sparse_matrix = word_vectorizer.fit_transform(data_group_1['cleaned_hm'])
frequencies = sum(sparse_matrix).toarray()[0]
bag_of_words = pd.DataFrame(frequencies, index=word_vectorizer.get_feature_names(), columns=['frequency', 'n-gram'])
nnnn = bag_of_words.sort_values('frequency')
nnnn.tail(100)
###Output
_____no_output_____
|
20_data_discovery/213_Metop-AB_GOME-2_NO2Tropo_L3_load_browse.ipynb
|
###Markdown
2 - Level 2 - Pre-processing 214 - Metop-A/B/C GOME-2 - Absorbing Aerosol Index - Level 3 >> 20 - DATA DISCOVERY SEE ALSO* **30 - CASE STUDIES** - [321 - Metop-A/B GOME-2 - Tropospheric NO2 map and time-series - Level 3](../30_case_studies/321_air_pollution_map_time-series_Metop-AB_GOME-2_NO2Tropo_L3.ipynb) - [323 - Air pollution - Tropospheric NO2 anomaly map - Metop-A/B GOME-2 - Level 2](../30_case_studies/323_air_pollution_map_europe_2020_Metop-AB_GOME-2_NO2Tropo_L2.ipynb) - [324 - Air pollution - Tropospheric NO2 time-series analysis - Metop-A/B GOME-2 - Level 2](../30_case_studies/324_air_pollution_time-series_europe_2020_Metop-AB_GOME-2_NO2Tropo_L2.ipynb) 2.1.3 Metop-A/B GOME-2 - Tropospheric NO2 - Level 3 Metop-A/B GOME-2 data records are monthly aggregated products (Level 3), regridded onto a regular latitude longitude grid. Data records are disseminated in the data format `netCDF`. Data records are processed for three parameters:* `tropospheric ozone column`, * `total and tropospheric nitrogen dioxide column`, and * `total water vapour column`.This notebook guides you through how the data can be loaded and gives basic plotting examples. Module outline:* [1 - Load and browse Metop-A/B GOME-2 data records](load_l3)* [2 - Plotting example of Metop-A/B GOME-2 Level 3 data](plotting) Load required libraries
###Code
import os
from netCDF4 import Dataset
import xarray as xr
import numpy as np
import pandas as pd
import datetime
# Python libraries for visualisation
from matplotlib import pyplot as plt
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
%run ../functions.ipynb
###Output
_____no_output_____
###Markdown
Load and browse Metop-A/B GOME-2 data records (Level 3) [xarray](http://xarray.pydata.org/en/stable/) is a Python library that makes it easy to load and browse data who are disseminated in the `NetCDF` format. Open NetCDF files with `xarray` Open one individual file with `xarray` By using xarray's `open_dataset()` function, one individual `netCDF` file can be loaded to create a new `xarray.Dataset`. Let us load a Level 3 data file for Nitrogen Dioxide for January 2017 and let us inspect the resulting `xarray.Dataset` and the `latitude` and `longitude` coordinates.
###Code
ds = xr.open_dataset('../eodata/gome2/level3/no2/GOME_NO2_Global_201701_METOPB_DLR_v1.nc')
ds, ds.latitude, ds.longitude
###Output
_____no_output_____
###Markdown
You see that the `data variable` section of the loaded `xarray.Dataset` is empty. This is due to the internal structure of the Metop-A/B GOME-2 Level 3 data. The data is organised in groups within the `netCDF` file. To load from a group, you have to pass the `group='PRODUCT'` as keyword to the `open_dataset()` function. If you do this, you can see that the `Dataset` contains six data variables:* (1) `NO2total`, * (2) `NO2total_err`, * (3) `NO2total_stddev`, * (4) `NO2trop`,* (5) `NO2trop_err`,* (6) `NO2trop_stddev`However, `latitude` and `longitude` do not contain any coordinates.
###Code
no2 = xr.open_dataset('../eodata/gome2/level3/no2/GOME_NO2_Global_201701_METOPB_DLR_v1.nc', group='PRODUCT')
no2
###Output
_____no_output_____
###Markdown
You can use the xarray function `assign_coords` to assign the coordinate values from your dataset to the dimensions of the `no2` data object.
###Code
no2_assigned = no2.assign_coords(latitude=ds.latitude, longitude=ds.longitude)
no2_assigned
###Output
_____no_output_____
###Markdown
Open multiple netCDF files with `xarray` xarray's `open_mfdataset()` function is very useful to load multiple netCDF files simultaneously. This function automatically concatenates and merges multiple files into a single xarray dataset. The behaviour of `xr.open_mfdataset` is the same as the one of `xr.open_dataset`. In order to access the dataset variables, you have to specify the `group` parameter. Additionally, if the dimension you wish to concatenate the individual files on is not defined in the original dataset, a new dimension can be defined by specifying the `concat_dim` key. Let us define `time` as the dimension the files shall be concatenated on.
###Code
mf_dataset = xr.open_mfdataset('../eodata/gome2/level3/no2/*.nc', concat_dim='time', combine='nested', group='PRODUCT')
mf_dataset
###Output
_____no_output_____
###Markdown
The resulting `xarray.Dataset` has three dimensions (`latitude`, `longitude` and `time`). With `assign_coords` you can assign the right coordinate values to the dimensions. You can use again the `ds` xarray.Dataset you defined at the beginning. With the help of Python's [pandas](https://pandas.pydata.org/) library, you can create efficiently a list of monthly time values, which can be assigned to the time dimension.
###Code
# Build list of time coordinates with pandas
time_coords = pd.date_range('2007-02', '2017-11', freq='MS').strftime("%Y-%m").tolist()
# Assign values to Dataset dimension
mf_dataset_assigned = mf_dataset.assign_coords(latitude=ds.latitude, longitude=ds.longitude, time=time_coords)
mf_dataset_assigned
###Output
_____no_output_____
###Markdown
Plotting example of Metop-A/B GOME-2 Level 3 data The last step is to visualize one time step of the created data object. Since the data are arranged on a regular grid, you can make use of the `pcolormesh` function of Pyton's plotting library [matplotlib](https://matplotlib.org/).As before, you can make use of Cartopy's features and create a basemap in a `PlateCarree` projection. You can add coast- and gridlines. The NO2 values are added as colored data values onto a regular grid with matplotlib's `pcolormesh` function. As you might need to re-use the code for plotting, we define a plotting function called [visualize_pcolormesh](../functions.ipynbvisualize_pcolormesh). Let us say, you want to plot the monthly averaged values of `Tropospheric Nitrogen Dioxide` for month November 2017. Before you can visualize it, you have to select the variable `NO2trop` from the `xarray.Dataset` *mf_dataset_assigned* you defined above.Further, you can select the month you wish to visualize by selecting the specific month of the time dimension.
###Code
no2_da = mf_dataset_assigned.NO2trop
no2_201711 = no2_da[129,:,:]
no2_201711
visualize_pcolormesh(data_array=no2_201711*1e-15,
longitude=no2_201711.longitude,
latitude=no2_201711.latitude,
projection=ccrs.PlateCarree(),
color_scale='viridis',
unit=no2_201711.units,
long_name=no2_201711.long_name,
vmin=0,
vmax=20,
lonmin=no2_201711.longitude.min(),
lonmax=no2_201711.longitude.max(),
latmin=no2_201711.latitude.min(),
latmax=no2_201711.latitude.max(),
set_global=True)
###Output
_____no_output_____
###Markdown
2 - Level 2 - Pre-processing 214 - Metop-A/B/C GOME-2 - Absorbing Aerosol Index - Level 3 >> 20 - DATA DISCOVERY SEE ALSO* **30 - CASE STUDIES** - [321 - Metop-A/B GOME-2 - Tropospheric NO2 map and time-series - Level 3](../30_case_studies/321_air_pollution_map_time-series_Metop-AB_GOME-2_NO2Tropo_L3.ipynb) - [323 - Air pollution - Tropospheric NO2 anomaly map - Metop-A/B GOME-2 - Level 2](../30_case_studies/323_air_pollution_map_europe_2020_Metop-AB_GOME-2_NO2Tropo_L2.ipynb) - [324 - Air pollution - Tropospheric NO2 time-series analysis - Metop-A/B GOME-2 - Level 2](../30_case_studies/324_air_pollution_time-series_europe_2020_Metop-AB_GOME-2_NO2Tropo_L2.ipynb) 2.1.3 Metop-A/B GOME-2 - Tropospheric NO2 - Level 3 Metop-A/B GOME-2 data records are monthly aggregated products (Level 3), regridded onto a regular latitude longitude grid. Data records are disseminated in the data format `netCDF`. Data records are processed for three parameters:* `tropospheric ozone column`, * `total and tropospheric nitrogen dioxide column`, and * `total water vapour column`.This notebook guides you through how the data can be loaded and gives basic plotting examples. Module outline:* [1 - Load and browse Metop-A/B GOME-2 data records](load_l3)* [2 - Plotting example of Metop-A/B GOME-2 Level 3 data](plotting) Load required libraries
###Code
import os
from netCDF4 import Dataset
import xarray as xr
import numpy as np
import pandas as pd
import datetime
# Python libraries for visualisation
from matplotlib import pyplot as plt
import cartopy.crs as ccrs
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
%run ../functions.ipynb
###Output
_____no_output_____
###Markdown
Load and browse Metop-A/B GOME-2 data records (Level 3) [xarray](http://xarray.pydata.org/en/stable/) is a Python library that makes it easy to load and browse data who are disseminated in the `NetCDF` format. Open NetCDF files with `xarray` Open one individual file with `xarray` By using xarray's `open_dataset()` function, one individual `netCDF` file can be loaded to create a new `xarray.Dataset`. Let us load a Level 3 data file for Nitrogen Dioxide for January 2017 and let us inspect the resulting `xarray.Dataset` and the `latitude` and `longitude` coordinates.
###Code
ds = xr.open_dataset('../eodata/gome2/level3/no2/GOME_NO2_Global_201701_METOPB_DLR_v1.nc')
ds, ds.latitude, ds.longitude
###Output
_____no_output_____
###Markdown
You see that the `data variable` section of the loaded `xarray.Dataset` is empty. This is due to the internal structure of the Metop-A/B GOME-2 Level 3 data. The data is organised in groups within the `netCDF` file. To load from a group, you have to pass the `group='PRODUCT'` as keyword to the `open_dataset()` function. If you do this, you can see that the `Dataset` contains six data variables:* (1) `NO2total`, * (2) `NO2total_err`, * (3) `NO2total_stddev`, * (4) `NO2trop`,* (5) `NO2trop_err`,* (6) `NO2trop_stddev`However, `latitude` and `longitude` do not contain any coordinates.
###Code
no2 = xr.open_dataset('../eodata/gome2/level3/no2/GOME_NO2_Global_201701_METOPB_DLR_v1.nc', group='PRODUCT')
no2
###Output
_____no_output_____
###Markdown
You can use the xarray function `assign_coords` to assign the coordinate values from your dataset to the dimensions of the `no2` data object.
###Code
no2_assigned = no2.assign_coords(latitude=ds.latitude, longitude=ds.longitude)
no2_assigned
###Output
_____no_output_____
###Markdown
Open multiple netCDF files with `xarray` xarray's `open_mfdataset()` function is very useful to load multiple netCDF files simultaneously. This function automatically concatenates and merges multiple files into a single xarray dataset. The behaviour of `xr.open_mfdataset` is the same as the one of `xr.open_dataset`. In order to access the dataset variables, you have to specify the `group` parameter. Additionally, if the dimension you wish to concatenate the individual files on is not defined in the original dataset, a new dimension can be defined by specifying the `concat_dim` key. Let us define `time` as the dimension the files shall be concatenated on.
###Code
mf_dataset = xr.open_mfdataset('../eodata/gome2/level3/no2/*.nc', concat_dim='time', combine='nested', group='PRODUCT')
mf_dataset
###Output
_____no_output_____
###Markdown
The resulting `xarray.Dataset` has three dimensions (`latitude`, `longitude` and `time`). With `assign_coords` you can assign the right coordinate values to the dimensions. You can use again the `ds` xarray.Dataset you defined at the beginning. With the help of Python's [pandas](https://pandas.pydata.org/) library, you can create efficiently a list of monthly time values, which can be assigned to the time dimension.
###Code
# Build list of time coordinates with pandas
time_coords = pd.date_range('2007-02', '2017-11', freq='MS').strftime("%Y-%m").tolist()
# Assign values to Dataset dimension
mf_dataset_assigned = mf_dataset.assign_coords(latitude=ds.latitude, longitude=ds.longitude, time=time_coords)
mf_dataset_assigned
###Output
_____no_output_____
###Markdown
Plotting example of Metop-A/B GOME-2 Level 3 data The last step is to visualize one time step of the created data object. Since the data are arranged on a regular grid, you can make use of the `pcolormesh` function of Pyton's plotting library [matplotlib](https://matplotlib.org/).As before, you can make use of Cartopy's features and create a basemap in a `PlateCarree` projection. You can add coast- and gridlines. The NO2 values are added as colored data values onto a regular grid with matplotlib's `pcolormesh` function. As you might need to re-use the code for plotting, we define a plotting function called [visualize_pcolormesh](../functions.ipynbvisualize_pcolormesh). Let us say, you want to plot the monthly averaged values of `Tropospheric Nitrogen Dioxide` for month November 2017. Before you can visualize it, you have to select the variable `NO2trop` from the `xarray.Dataset` *mf_dataset_assigned* you defined above.Further, you can select the month you wish to visualize by selecting the specific month of the time dimension.
###Code
no2_da = mf_dataset_assigned.NO2trop
no2_201711 = no2_da[129,:,:]
no2_201711
visualize_pcolormesh(data_array=no2_201711*1e-15,
longitude=no2_201711.longitude,
latitude=no2_201711.latitude,
projection=ccrs.PlateCarree(),
color_scale='viridis',
unit=no2_201711.units,
long_name=no2_201711.long_name,
vmin=0,
vmax=20,
lonmin=no2_201711.longitude.min(),
lonmax=no2_201711.longitude.max(),
latmin=no2_201711.latitude.min(),
latmax=no2_201711.latitude.max(),
set_global=True)
###Output
_____no_output_____
|
notebooks/1.0-jdl-webscraping-data.ipynb
|
###Markdown
Web scraping to obtain League of Legends data------ Import relevant modules
###Code
import sys
import requests
import time
import random
import datetime
import pandas as pd
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import chromedriver_binary
###Output
_____no_output_____
###Markdown
Record date that data scraped Last collected 2019-07-16 (FULL MOON)
###Code
# Get current date and time
now = datetime.datetime.now()
year_scraped = str(now.year)
month_scraped = str(now.month)
day_scraped = str(now.day)
# Add leading zeroes to single-digit months and days
if len(month_scraped) == 1:
month_scraped = '0' + month_scraped
if len(day_scraped) == 1:
day_scraped = '0' + day_scraped
# Construct date string
date_data = year_scraped + '-' + month_scraped + '-' + day_scraped
###Output
_____no_output_____
###Markdown
Get champion names and release dates---
###Code
champions_df = pd.read_html('https://leagueoflegends.fandom.com/wiki/List_of_champions')[1]
champions_df = champions_df[['Champion', 'Release Date']]
champions_df.columns = ['champion', 'release_date']
champions_df.head()
###Output
_____no_output_____
###Markdown
Get rid of champion titles
###Code
names = list(champions_df['champion'])
names = [s.split(',')[0] for s in names]
names = [s.split('\xa0the')[0] for s in names]
print(names[0:10])
###Output
['Aatrox', 'Ahri', 'Akali', 'Alistar', 'Amumu', 'Anivia', 'Annie', 'Ashe', 'Aurelion Sol', 'Azir']
###Markdown
Get number of skins---
###Code
# Set up selenium web driver
driver = webdriver.Chrome()
# Get number of skins
num_skins = []
for name in names:
name = name.replace(' ', '_')
skins_url = f'https://leagueoflegends.fandom.com/wiki/{name}/Skins'
driver.get(skins_url)
time.sleep(1)
soup = BeautifulSoup(driver.page_source, 'html.parser')
num_skins.append(len(soup.find_all('div', {'style':'display:inline-block; margin:5px; width:342px'})))
driver.close()
print(num_skins[:10])
# Subtract 1 from number of skins for each champion because of base skin
for idx, skins in enumerate(num_skins):
num_skins[idx] = skins - 1
print(num_skins[:10])
###Output
[5, 10, 11, 12, 10, 8, 11, 10, 2, 4]
###Markdown
Get pick, win, and ban rates--- Set up selenium webdriver
###Code
champstats_url = 'https://na.op.gg/statistics/champion/'
driver = webdriver.Chrome()
driver.get(champstats_url)
###Output
_____no_output_____
###Markdown
Select stats for current day, ranked queue, and for all ranks
###Code
# Ranked queue and all ranks are already default, but today isn't
today_button = driver.find_element_by_xpath('//*[@id="recent_today"]/span/span')
today_button.click()
###Output
_____no_output_____
###Markdown
Get win rate data
###Code
winrate_button = driver.find_element_by_xpath('//*[@id="rate_win"]/span/span')
winrate_button.click()
winrate_df = pd.read_html(driver.page_source)[1]
winrate_df = winrate_df[['Champion.1', 'Win rate']]
winrate_df.columns = ['champion', 'winrate']
winrate_df.sort_values(by = 'champion', inplace = True)
winrate_df.head()
###Output
_____no_output_____
###Markdown
Get ban rate data
###Code
banrate_button = driver.find_element_by_xpath('//*[@id="rate_ban"]/span/span')
banrate_button.click()
banrate_df = pd.read_html(driver.page_source)[1]
banrate_df = banrate_df[['Champion.1', 'Ban ratio per game']]
banrate_df.columns = ['champion', 'banrate']
banrate_df.sort_values(by = 'champion', inplace = True)
banrate_df.head()
###Output
_____no_output_____
###Markdown
Get pick rate data
###Code
pickrate_button = driver.find_element_by_xpath('//*[@id="rate_pick"]/span/span')
pickrate_button.click()
pickrate_df = pd.read_html(driver.page_source)[1]
pickrate_df = pickrate_df[['Champion.1', 'Pick ratio per game']]
pickrate_df.columns = ['champion', 'pickrate']
pickrate_df.sort_values(by = 'champion', inplace = True)
pickrate_df.head()
driver.close()
###Output
_____no_output_____
###Markdown
Get patch when champion was last changed
###Code
# Set up selenium web driver
driver = webdriver.Chrome()
# Get patch when champion was last changed
last_patch = []
for name in names:
name = name.replace(' ', '_')
champ_url = f'https://lol.gamepedia.com/{name}#Patch_History'
driver.get(champ_url)
#time.sleep(1)
soup = BeautifulSoup(driver.page_source, 'html.parser')
history = [link for link in soup.find_all('a')
if '>v1.' in str(link) or 'Patch 1.' in str(link)
or '>v2.' in str(link) or 'Patch 2.' in str(link)
or '>v3.' in str(link) or 'Patch 3.' in str(link)
or '>v4.' in str(link) or 'Patch 4.' in str(link)
or '>v5.' in str(link) or 'Patch 5.' in str(link)
or '>v6.' in str(link) or 'Patch 6.' in str(link)
or '>v7.' in str(link) or 'Patch 7.' in str(link)
or '>v8.' in str(link) or 'Patch 8.' in str(link)
or '>v9.' in str(link) or 'Patch 9.' in str(link)]
most_recent = history[0]
most_recent = str(most_recent)[-8:-4]
last_patch.append(most_recent)
driver.close()
print(last_patch[0:20])
for idx, patch in enumerate(last_patch):
last_patch[idx] = patch.replace('v','')
for idx, patch in enumerate(last_patch):
last_patch[idx] = patch.replace(' ', '')
print(last_patch[:20])
###Output
['9.12', '8.20', '9.11', '8.21', '9.12', '8.24', '8.17', '9.12', '9.2', '9.7', '8.17', '9.9', '9.10', '8.18', '9.12', '9.8', '9.8', '9.7', '9.13', '9.9']
###Markdown
Construct fully scraped data frame---
###Code
# Create lists from columns of scraped data frames
release_date = list(champions_df['release_date'])
win_rate = list(winrate_df['winrate'])
ban_rate = list(banrate_df['banrate'])
pick_rate = list(pickrate_df['pickrate'])
# Create full data frame
data = list(zip(names, release_date, last_patch, num_skins, win_rate, ban_rate, pick_rate))
colnames = ['champion', 'release_date', 'last_patch', 'num_skins', 'win_rate', 'ban_rate', 'pick_rate']
scraped_df = pd.DataFrame(data, columns = colnames)
scraped_df['date_data'] = date_data
scraped_df.head(20)
###Output
_____no_output_____
###Markdown
Write data frame to csv file
###Code
filename = f'scraped_data_{date_data}.csv'
filedir = f'/Users/jeremy_lehner/Documents/GitHub/metis_project2/data/raw/{filename}'
scraped_df.to_csv (filedir, index = None, header = True)
###Output
_____no_output_____
|
question_creator.ipynb
|
###Markdown
Create questions from ontology by reverse engineering questions
###Code
# Ontology structure:
'''
[
{"concept": ["relationship::RelatedConcept", relationship::RelatedConcept]},
{"concept": ["relationship1::RelatedConcept"]}
]
'''
ontology = [
{"Service_Value":
[
"has::Warranty"
]
},
{"Service_Value_Warranty":
[
"has_parameter::Availiability",
"has_parameter::Continuity",
"has_parameter::Capacity",
"has_parameter::Security",
"is_part_of::Service_Value"
]
},
# {"": []}
]
import random
# print(ontology)
def return_concept_index(concept):
concept_index = [(index, item)[0] for index, item in enumerate(ontology) if concept in item.keys()]
try:
return concept_index[0]
except IndexError:
return False
def pick_concept(ontology, concept=""):
concept_index = 0
if concept == "":
concept_index = random.randint(0, len(ontology))
else:
concept_index = return_concept_index(concept)
# print(concept_index)
if concept_index is False:
return concept_index
return ontology[concept_index]
# return ontology[concept_index]
concept_name = "Service_Value_Warranty"
concept = pick_concept(ontology, concept_name)
if concept is False:
print(f"Concept '{concept_name}' not recognised in ontology")
exit(0)
def pick_relation(ontology, concept, related_concept="", relationship=""):
if related_concept is "":
# for item in concept[concept_name]:
# print(item.split("::"))
# relationships = [{item.split("::")[0]: item.split("::")[1]} for item in concept[concept_name]]
temp_relationships = [item.split("::") for item in concept[concept_name]]
relationships = {}
for item in temp_relationships:
if item[0] in relationships:
relationships[item[0]].append(item[1])
else:
relationships[item[0]] = []
relationships[item[0]].append(item[1])
if relationship is "":
return random.choice(list(relationships.items()))
else:
try:
return relationships[relationship]
except KeyError:
return False
# print(relationships)
concept_relationship = list(pick_relation(ontology, concept))
question_values = [concept_name, concept_relationship[0], concept_relationship[1]]
print(question_values)
question_templates = [
"<DT> {len()} {concept_relationship} of {concept}::{related_concept(4)}"
# The four parameters of the Warranty in Service Value are:
]
print(question_templates[0])
###Output
['Service_Value_Warranty', 'has_parameter', ['Availiability', 'Continuity', 'Capacity', 'Security']]
<DT> {len()} {concept_relationship} of {concept}::{related_concept(4)}
|
Feature_learning_pipeline_ForAnge2.ipynb
|
###Markdown
GET VARIABLES
###Code
## CHANGE PATH ##
df = pandas.read_csv('/Users/jakevogel/Downloads/adni_bl_all_ica50_scores_20170922.csv')
df.head()
df.columns.tolist()[:14]
df.shape
train = df[df.MCI == 0]
train.shape
X_train = train[train.columns[14:]]
X_train.shape
test = df[df.MCI == 1]
X_test = test[test.columns[14:]]
X_test.shape
y_train = train.AD
y_test = test.conv_2_AD
###Output
_____no_output_____
###Markdown
RUN IT! This is just a bunch of random settings -- I did not tweak or optimize it. Feel free to use different input data,tweak the parameters (i.e. folds, p-cutoff, clf), or whatever else. However, I am working on an optimizer scriptright now that I can send when its ready.
###Code
jnk_weights, jnk_pred, jnk_t_pred, jnk_ints = kfold_feature_learning(
X_train, X_test, y_train, y_test,
p_cutoff=0.1,problem = 'classification', folds = 10,
clf = linear_model.SGDClassifier(loss='modified_huber',penalty='l1',random_state=123),
output='light', scale=True, regcols = None, regdf = None,
keep_cols = None, save_int = True)
###Output
running ttests for fold 1 of 10
running model for fold 1 of 10
running ttests for fold 2 of 10
running model for fold 2 of 10
running ttests for fold 3 of 10
running model for fold 3 of 10
running ttests for fold 4 of 10
running model for fold 4 of 10
running ttests for fold 5 of 10
running model for fold 5 of 10
running ttests for fold 6 of 10
running model for fold 6 of 10
running ttests for fold 7 of 10
running model for fold 7 of 10
running ttests for fold 8 of 10
running model for fold 8 of 10
running ttests for fold 9 of 10
running model for fold 9 of 10
running ttests for fold 10 of 10
running model for fold 10 of 10
39 features selected
validation sensitivity: 0.6352941176470588
validation specificity: 0.8695652173913043
validation accuracy: 0.7955390334572491
testing sensitivity: 0.6078431372549019
testing specificity: 0.8789808917197452
testing accuracy: 0.8125
###Markdown
Now using an embedded grid search into the pipeline! Again I did not mess with the parameters of the feature_learning function -- feel free to do so
###Code
params1 = {'loss': ['hinge','log', 'modified_huber', 'squared_hinge', 'perceptron'],
'penalty': ['none', 'l2', 'l1'],
'alpha': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1]}
params2 = {'loss': ['hinge','log', 'modified_huber', 'squared_hinge'],
'penalty': ['elasticnet'],
'alpha': [0.000001, 0.00001, 0.0001, 0.001, 0.01, 0.1],
'l1_ratio': [0.05,0.1,0.15,0.2,0.3,0.5]}
selector = model_selection.GridSearchCV(linear_model.SGDClassifier(random_state=123),[params1,params2],cv=10,
scoring = 'f1')
output = kfold_feature_learning(X_train, X_test, y_train, y_test, clf = selector, search = True,
p_cutoff=None,problem = 'classification', folds = 3, output='light',
scale=True, regcols = None, regdf = None, keep_cols = None, save_int = True)
wts = pandas.DataFrame(output[0],columns = ['Weight'])
#wts = wts.loc[abs(wts).sort_values('Weight',ascending = False).index]
wts.head()
plt.close()
sns.set(font_scale=2)
fig,ax = plt.subplots(figsize=(10,5))
fig = sns.barplot(x = wts.index,
y='Weight',
data=wts.loc[abs(wts).sort_values('Weight',ascending = False).index],palette='Blues_d')
for item in fig.get_xticklabels():
item.set_rotation(90)
sns.plt.xlabel('Features')
sns.plt.ylabel('Weight')
plt.show()
###Output
_____no_output_____
###Markdown
Experimenting with a random forest
###Code
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train_scl, y_train)
clf.score(X_test_scl, y_test)
rfoutput = kfold_feature_learning(X_train, X_test, y_train, y_test, clf = RandomForestClassifier(), search = False,
p_cutoff=None, problem = 'classification', folds = 3, output='light',
scale=True, regcols = None, regdf = None, keep_cols = None, save_int = True)
clf.feature_importances_
clf.predict_proba(X_train_scl)[:,1]
pandas.DataFrame(X_test).mean().values
xx.ravel().shape
yy.ravel().shape
clf.predict
###Output
_____no_output_____
|
music.ipynb
|
###Markdown
First I'll make a dictionary so that I can look up the MIDI addresses of notes.
###Code
note_ref = {'C':24,'C#':25,'D':26,'D#':27,'E':28,'F':29,'G#':30,'G':31,'G#':32,'A':33,'A#':34,'B':35}
###Output
_____no_output_____
###Markdown
Next I'll make a function that creates a list of bit strings, such that each string is only one bit different from it's neighbours in the list. For example, `make_line(8)` generates ['000', '100', '110', '010', '011', '111', '101', '001']
###Code
def make_line ( length ):
# outputs `line`: a list of `length` bit strings (plus a few extra if length isn't a power of two)
# strings defined such that line[j] is Hamming distance 1 from line[j+1], for all j
n = int(np.ceil(np.log(length)/np.log(2)))
line = ['0','1']
for j in range(n-1):
line = line + line[::-1]
for j in range(int(len(line)/2)):
line[j] += '0'
for j in range(int(len(line)/2),int(len(line))):
line[j] += '1'
return line
###Output
_____no_output_____
###Markdown
The idea behind what follows is to take a score and represent each event using the information* note* octave* bar* beat* volumeI then create a separate subscore for each note. For each event in this, I treat (octave,bar,beat) as a coordinate.To each set of coordinates I associate a bit string, such that these strings differ by only one bit if they correspond to an event on the same beat in the same bar, but on a neighbouring octave, or the same octave and bar but a neighbouring beat, or the same octave and beat but a neighbouring bar. This can be done by using `make_line` to create a seperate line for octaves, bars and beats, and then associating a bit string with each octave, beat and bar. The bit string for any (octave,bar,beat) is then chosen to be the concatenation of the octave, beat and bar strings.I then encode the subscore as a quantum state, by associating the probability of the output bit string for (octave,beat,bar) with the corresponding volume. Encoding the subscore can be done by creating a statevector for which the amplitudes correspond to the square root of the volume (and then normalized). Decoding can be done straightforwardly from the counts dictionary (given enough samples). On a simulator, it can be done by extracting the statevector and reversing the encoding.While the subscore is in the quantum program, it can be manipulated. Small angle rotations, for example, will cause events to bleed volume into neighbouring coordinates and begin to quantumly interefere with each other.The above is all the idea behind what follows. Unfortunately, I seem to have strayed from this when actually doing it. It does produce some sort of an output, though!The parameter `theta` below affects how much interference is caused. For `theta = 0`, the score comes out as it went in. For `theta = np.pi/2` it is a total keyboard mash.
###Code
theta = np.pi/3
def make_box ( lengths ):
# same as `make_line`, but outputs `box` for multidimensonal connectivity
box = ['']
for length in lengths:
new_box = []
line = make_line(length)
for bstring in box:
for lstring in line:
new_box.append( bstring + lstring)
box = new_box
return box
# set up a piece
bar_num = 12 # number of bars
octave_num = 4 # number of octaves
beat_num = 4 # number of beats per bar, (or half beats, etc depending on desired resolution and tempo)
notes_used = ['C','D','E','F','G','A'] # ['C','C#','D','D#','E','F','G#','G','G#','A','A#','B']
# set up a score
score = []
score.append({'note':'C','octave':2,'bar':0,'beat':0, 'volume':100})
score.append({'note':'C','octave':2,'bar':0,'beat':1, 'volume':100})
score.append({'note':'G','octave':2,'bar':0,'beat':2, 'volume':100})
score.append({'note':'G','octave':2,'bar':0,'beat':3, 'volume':100})
score.append({'note':'A','octave':2,'bar':1,'beat':0, 'volume':100})
score.append({'note':'A','octave':2,'bar':1,'beat':1, 'volume':100})
score.append({'note':'G','octave':2,'bar':1,'beat':2, 'volume':100})
score.append({'note':'F','octave':2,'bar':2,'beat':0, 'volume':100})
score.append({'note':'F','octave':2,'bar':2,'beat':1, 'volume':100})
score.append({'note':'E','octave':2,'bar':2,'beat':2, 'volume':100})
score.append({'note':'E','octave':2,'bar':2,'beat':3, 'volume':100})
score.append({'note':'D','octave':2,'bar':3,'beat':0, 'volume':100})
score.append({'note':'D','octave':2,'bar':3,'beat':1, 'volume':100})
score.append({'note':'C','octave':2,'bar':3,'beat':2, 'volume':100})
score.append({'note':'G','octave':2,'bar':4,'beat':0, 'volume':100})
score.append({'note':'G','octave':2,'bar':4,'beat':1, 'volume':100})
score.append({'note':'F','octave':2,'bar':4,'beat':2, 'volume':100})
score.append({'note':'F','octave':2,'bar':4,'beat':3, 'volume':100})
score.append({'note':'E','octave':2,'bar':5,'beat':0, 'volume':100})
score.append({'note':'E','octave':2,'bar':5,'beat':1, 'volume':100})
score.append({'note':'D','octave':2,'bar':5,'beat':2, 'volume':100})
score.append({'note':'G','octave':2,'bar':6,'beat':0, 'volume':100})
score.append({'note':'G','octave':2,'bar':6,'beat':1, 'volume':100})
score.append({'note':'F','octave':2,'bar':6,'beat':2, 'volume':100})
score.append({'note':'F','octave':2,'bar':6,'beat':3, 'volume':100})
score.append({'note':'E','octave':2,'bar':7,'beat':0, 'volume':100})
score.append({'note':'E','octave':2,'bar':7,'beat':1, 'volume':100})
score.append({'note':'D','octave':2,'bar':7,'beat':2, 'volume':100})
score.append({'note':'C','octave':2,'bar':8,'beat':0, 'volume':100})
score.append({'note':'C','octave':2,'bar':8,'beat':1, 'volume':100})
score.append({'note':'G','octave':2,'bar':8,'beat':2, 'volume':100})
score.append({'note':'G','octave':2,'bar':8,'beat':3, 'volume':100})
score.append({'note':'A','octave':2,'bar':9,'beat':0, 'volume':100})
score.append({'note':'A','octave':2,'bar':9,'beat':1, 'volume':100})
score.append({'note':'G','octave':2,'bar':9,'beat':2, 'volume':100})
score.append({'note':'F','octave':2,'bar':10,'beat':0, 'volume':100})
score.append({'note':'F','octave':2,'bar':10,'beat':1, 'volume':100})
score.append({'note':'E','octave':2,'bar':10,'beat':2, 'volume':100})
score.append({'note':'E','octave':2,'bar':10,'beat':3, 'volume':100})
score.append({'note':'D','octave':2,'bar':11,'beat':0, 'volume':100})
score.append({'note':'D','octave':2,'bar':11,'beat':1, 'volume':100})
score.append({'note':'C','octave':2,'bar':11,'beat':2, 'volume':100})
bars = int(np.ceil(np.log(bar_num)/np.log(2)))
octaves = int(np.ceil(np.log(octave_num)/np.log(2)))
beats = int(np.ceil(np.log(beat_num)/np.log(2)))
box = make_box([bar_num,octave_num,beat_num])
ket = {}
for note in notes_used:
ket[note] = [0]*len(box)
for event in score:
address = ''
for coord,limit in [(event['bar'],bars),(event['octave'],octaves),(event['beat'],beats)]:
address += ("{:0"+str(limit)+"b}").format(coord)
ket[event['note']][int(address,2)] = np.sqrt(event['volume'])
for note in notes_used:
N = 0
for amp in ket[note]:
N += amp*np.conj(amp)
for j,amp in enumerate(ket[note]):
ket[note][j] = amp/np.sqrt(N)
n = bars+octaves+beats
backend = Aer.get_backend('qasm_simulator')
qcs = {}
for note in notes_used:
qcs[note] = QuantumCircuit(n,n,name=note)
qcs[note].initialize(ket[note],qcs[note].qregs[0])
qcs[note].ry(theta,qcs[note].qregs[0])
qcs[note].measure(qcs[note].qregs[0],qcs[note].cregs[0])
counts = {}
for note in notes_used:
counts[note] = execute(qcs[note],backend,shots=8192).result().get_counts(note)
notes = {}
for note in notes_used:
maxvol = 0
these_notes = {}
for string in counts[note]:
beat = int(string[-beats::],2)
octave = int(string[-beats-octaves:-beats:],2)
bar = int(string[0:bars],2)
vol = np.log( counts[note][string] )
these_notes[(note,beat,octave,bar)] = vol
maxvol = max(maxvol,vol)
for coords in these_notes:
these_notes[coords] = int(these_notes[coords]*100/maxvol)
for note in these_notes:
notes[note] = these_notes[note]
track = 0
channel = 0
tempo = 60
MyMIDI = MIDIFile(1)
MyMIDI.addTempo(track, 0, tempo)
MyMIDI.addProgramChange(0,0,0,1)
for (note,beat,octave,bar) in notes:
degree = note_ref[note] + octave*12
time = beat + beat_num*bar
duration = 1
volume = notes[(note,beat,octave,bar)]
MyMIDI.addNote(track, channel, degree, time, duration, volume)
with open("music.mid", "wb") as output_file:
MyMIDI.writeFile(output_file)
pygame.init()
pygame.mixer.music.load("music.mid")
pygame.mixer.music.play()
###Output
_____no_output_____
###Markdown
Music Creation using LSTM **Installing Libraries**h5py will be required later while training the model in order to store trained model weights after every epoch
###Code
# h5py will be required later while training the model in
pip install h5py
# Importing required classes
from pydub import AudioSegment
import os
import glob
import numpy as np
from music21 import converter, instrument, note, chord
# importing Keras libraries
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import LSTM
from keras.layers import Activation
from keras.layers import BatchNormalization as BatchNorm
from keras.utils import np_utils
from keras.callbacks import ModelCheckpoint
###Output
_____no_output_____
###Markdown
The dataset is uploaded from the system into the midi_songs directory. This directory contains 92 .mid extension files.
###Code
#os.mkdir('midi_songs')
#%cd ../
os.getcwd()
# Making Notes list of all notes and chords using file path
notes = []
for file in glob.glob("midi_songs/*.mid"):
midi = converter.parse(file)
notes_to_parse = None
parts = instrument.partitionByInstrument(midi)
if parts: # file has instrument parts
notes_to_parse = parts.parts[0].recurse()
else: # file has notes in a flat structure
notes_to_parse = midi.flat.notes
for element in notes_to_parse:
if isinstance(element, note.Note):
notes.append(str(element.pitch))
elif isinstance(element, chord.Chord):
notes.append('.'.join(str(n) for n in element.normalOrder))
len(notes)
seq_length = 100
# sorting all notes and chords pitches
pitch_names = sorted(set(item for item in notes))
# total unique notes/combination_of_notes in the dataset
len(pitch_names)
# Creating note_to_int dictionary
note_to_indx = dict((note,i) for i,note in enumerate(pitch_names))
# Creating inputs and corresponding outputs for LSTM model
input_seq = []
output_seq = []
for i in range(0,len(notes)-seq_length,1):
seq_in = notes[i:i+seq_length]
seq_out = notes[i+seq_length]
input_seq.append([note_to_indx[key] for key in seq_in])
output_seq.append(note_to_indx[seq_out])
n_patterns = len(input_seq)
# reshape the input into a format compatible with LSTM layers
input_seq = np.reshape(input_seq, (n_patterns, seq_length, 1))
input_seq = input_seq/len(pitch_names)
output_seq = np_utils.to_categorical(output_seq)
input_seq.shape
###Output
_____no_output_____
###Markdown
Defining the model using keras Sequential class. After which it is trained and the weights are saved.
###Code
model = Sequential()
model.add(LSTM(256,input_shape=(input_seq.shape[1],input_seq.shape[2]),return_sequences=True))
model.add(Dropout(0.13))
model.add(LSTM(512))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(len(pitch_names)))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
filepath = "weights-improvement-{epoch:02d}-{loss:.4f}-bigger.hdf5"
checkpoint = ModelCheckpoint(
filepath, monitor='loss',
verbose=0,
save_best_only=True,
mode='min'
)
callbacks_list = [checkpoint]
model.fit(input_seq, output_seq, epochs=40, batch_size=100, callbacks=callbacks_list)
###Output
_____no_output_____
###Markdown
Building another structure but htis time instead of training loading it with weights obtained during traing
###Code
model = Sequential()
model.add(LSTM(
256,
input_shape=(input_seq.shape[1], input_seq.shape[2]),
return_sequences=True
))
model.add(Dropout(0.3))
model.add(LSTM(512))
model.add(Dense(256))
model.add(Dropout(0.3))
model.add(Dense(358))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Load the weights to each node
model.load_weights('/content/weights-improvement-40-0.5465-bigger.hdf5')
###Output
_____no_output_____
###Markdown
Making predictions
###Code
# prediction
start = np.random.randint(0, len(input_seq)-1)
int_to_note = dict((number, note) for number, note in enumerate(pitch_names))
pattern = input_seq[start]
prediction_output = []
pattern = np.array(pattern)
# generate 500 notes
for note_index in range(500):
prediction_input = np.reshape(pattern, (1, len(pattern), 1))
prediction_input = prediction_input / float(len(pitch_names))
prediction = model.predict(prediction_input, verbose=0)
prediction = np.reshape(prediction,(358,1))
index = np.argmax(prediction)
result = int_to_note[index]
#making index size as that of pattern to concat.
index = [[index]]
prediction_output.append(result)
pattern = np.concatenate((pattern,index))
pattern = pattern[1:len(pattern)+1]
# see predicted sequence of notes
print(result)
###Output
_____no_output_____
###Markdown
obtaining notes
###Code
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# increase offset each iteration so that notes do not stack
offset += 0.5
###Output
_____no_output_____
###Markdown
Creating the MIDI file the file can later be converted to .mp3 format from [here](https://www.zamzar.com/) for listening your predicted music.
###Code
from music21 import stream
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='test_output.mid')
###Output
_____no_output_____
###Markdown
Some quantum musicWe will be using maths and Midi files, so let's start with some imports.
###Code
import numpy as np
from midiutil import MIDIFile
###Output
_____no_output_____
###Markdown
We'll be manipulating the music in the Midi files using some quantum software. For that we'll be using [Quantum Blur](https://github.com/qiskit-community/QuantumBlur/blob/master/README.md) (which has Qiskit under the hood).
###Code
import quantumblur as qb
###Output
_____no_output_____
###Markdown
The process will be to start with an initial piece of music and then modify it. We'll do this using a structure such as the following (which represents 'Twinkle twinkle little star').
###Code
score = [{'note': 'C', 'octave': 0, 'bar': 0, 'beat': 0, 'volume': 100}, {'note': 'C', 'octave': 0, 'bar': 0, 'beat': 1, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 0, 'beat': 2, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 0, 'beat': 3, 'volume': 100}, {'note': 'A', 'octave': 0, 'bar': 1, 'beat': 0, 'volume': 100}, {'note': 'A', 'octave': 0, 'bar': 1, 'beat': 1, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 1, 'beat': 2, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 2, 'beat': 0, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 2, 'beat': 1, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 2, 'beat': 2, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 2, 'beat': 3, 'volume': 100}, {'note': 'D', 'octave': 0, 'bar': 3, 'beat': 0, 'volume': 100}, {'note': 'D', 'octave': 0, 'bar': 3, 'beat': 1, 'volume': 100}, {'note': 'C', 'octave': 0, 'bar': 3, 'beat': 2, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 4, 'beat': 0, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 4, 'beat': 1, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 4, 'beat': 2, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 4, 'beat': 3, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 5, 'beat': 0, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 5, 'beat': 1, 'volume': 100}, {'note': 'D', 'octave': 0, 'bar': 5, 'beat': 2, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 6, 'beat': 0, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 6, 'beat': 1, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 6, 'beat': 2, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 6, 'beat': 3, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 7, 'beat': 0, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 7, 'beat': 1, 'volume': 100}, {'note': 'D', 'octave': 0, 'bar': 7, 'beat': 2, 'volume': 100}, {'note': 'C', 'octave': 0, 'bar': 8, 'beat': 0, 'volume': 100}, {'note': 'C', 'octave': 0, 'bar': 8, 'beat': 1, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 8, 'beat': 2, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 8, 'beat': 3, 'volume': 100}, {'note': 'A', 'octave': 0, 'bar': 9, 'beat': 0, 'volume': 100}, {'note': 'A', 'octave': 0, 'bar': 9, 'beat': 1, 'volume': 100}, {'note': 'G', 'octave': 0, 'bar': 9, 'beat': 2, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 10, 'beat': 0, 'volume': 100}, {'note': 'F', 'octave': 0, 'bar': 10, 'beat': 1, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 10, 'beat': 2, 'volume': 100}, {'note': 'E', 'octave': 0, 'bar': 10, 'beat': 3, 'volume': 100}, {'note': 'D', 'octave': 0, 'bar': 11, 'beat': 0, 'volume': 100}, {'note': 'D', 'octave': 0, 'bar': 11, 'beat': 1, 'volume': 100}, {'note': 'C', 'octave': 0, 'bar': 11, 'beat': 2, 'volume': 100}]
###Output
_____no_output_____
###Markdown
To play the music in these 'scores', we will turn it into Midi files. The function below does this.
###Code
midi_C = 36
beats_per_bar = 4
note_ref = {'C':0,'C#':1,'D':2,'D#':3,'E':4,'F':5,'F#':6,'G':7,'G#':8,'A':9,'A#':10,'B':11}
def score2midi(score,filename):
track = 0
channel = 0
tempo = 60
MyMIDI = MIDIFile(1)
MyMIDI.addTempo(track, 0, tempo)
MyMIDI.addProgramChange(0,0,0,1)
for note in score:
n = note_ref[note['note']]
beat = note['beat']
octave = note['octave']
bar = note['bar']
degree = n + octave*12 + midi_C
time = beat + beats_per_bar*bar
duration = 1
volume = note['volume']
MyMIDI.addNote(track, channel, degree, time, duration, volume)
with open(filename, 'wb') as output_file:
MyMIDI.writeFile(output_file)
###Output
_____no_output_____
###Markdown
To see what the music looks like, we can turn it into an image.First we turn it into what we call a heightmap, which assigns a single value to a set if coordinates. This value could be interpreted as either a height (hence the name) or brightness (for a monochrome image).The coordinates we will use for our music will represent the semitone for the note (on the y axis) and beat at which it occurs (on the x axis).To make the music easier to encode in a quantum computer, we'll also rescale from the usual 12 semitones per octave to a power of 2. When converting from the heightmap back to a score, we'll rescale it back again.The following function turns scores into heightmaps.
###Code
semis_per_octave = 16
def score2height(score):
h = {}
for note in score:
n = int( np.round(note_ref[note['note']] * semis_per_octave/12) + semis_per_octave*note['octave'] )
t = note['beat'] + beats_per_bar*note['bar']
if (t,n) not in h:
h[t,n] = note['volume']
else:
h[t,n] += note['volume']
h_max = max(h.values())
for t,n in h:
h[t,n] /= h_max
return h
###Output
_____no_output_____
###Markdown
The next one turns heightmaps into images that can be easily rendered here in a Jupyter notebook.
###Code
def score2image(score,scale=10):
height = score2height(score)
img = qb.height2image(height)
img = img.resize((img.size[0]*10,img.size[1]*10),0)
return img
###Output
_____no_output_____
###Markdown
Here's our score as an image.
###Code
score2image(score)
###Output
_____no_output_____
###Markdown
The final step is to turn heightmaps back into scores.This has an option to call the function with `max_only=True`. In the case that there are two notes played at the same time, this removes all but those with maximum value, and collects all the volume of the rejected notes into the ones that remain.
###Code
def height2score(height,max_only=False):
score = []
max_h = max(height.values())
notes4beats = [[] for _ in range(10000) ]
for (t,n) in height:
note = {}
note['beat'] = t%beats_per_bar
note['bar'] = int((t-note['beat'])/beats_per_bar)
n0 = np.round((n%semis_per_octave)*12/semis_per_octave)
note['note'] = list(note_ref.keys())[list(note_ref.values()).index(int(n0))]
note['octave'] = int((n - n%semis_per_octave)/semis_per_octave)
note['volume'] = int(100*height[t,n]/max_h)
notes4beats[t].append(note.copy())
for t,notes in enumerate(notes4beats):
if max_only:
max_vol = 0
total_vol = 0
for note in notes:
max_vol = max( note['volume'], max_vol )
total_vol += note['volume']
for note in notes:
if note['volume']==max_vol:
note['volume'] = min(total_vol,100)
score.append(note)
else:
for note in notes:
score.append(note)
return score
###Output
_____no_output_____
###Markdown
Now we can actually do something!Quantum Blur has been designed to encode heightmaps as quantum circuits, and turn quantum circuits back to heightmaps. This allows the heightmaps to be manipulated by changing the circuit.First, let's just turn the score into a circuit and back again, to see that it comes back in one piece.
###Code
height = score2height(score)
qc = qb.height2circuit(height)
new_height = qb.circuit2height(qc)
new_score = height2score(new_height,max_only=False)
score2image(new_score)
###Output
_____no_output_____
###Markdown
Now let's add some `rx` rotations on all qubits, which is the simplest way to manipulate the height map.
###Code
theta = np.pi/8
height = score2height(score)
qc = qb.height2circuit(height)
qc.rx(theta,range(qc.num_qubits))
new_height = qb.circuit2height(qc)
new_score = height2score(new_height,max_only=False)
score2image(new_score)
###Output
_____no_output_____
###Markdown
Here the angle of rotation was quite small, so it didn't change the score much. In fact, with `max_only=True` we find that it hardly changed at all.
###Code
new_score = height2score(new_height,max_only=True)
score2image(new_score)
###Output
_____no_output_____
###Markdown
The effect is more pronounced with a larger angle.
###Code
theta = np.pi*0.41
height = score2height(score)
qc = qb.height2circuit(height)
qc.rx(theta,range(qc.num_qubits))
new_height = qb.circuit2height(qc)
new_score = height2score(new_height,max_only=False)
score2image(new_score)
###Output
_____no_output_____
###Markdown
And still results in something very different when we take `max_only=True`.
###Code
new_score = height2score(new_height,max_only=True)
score2image(new_score)
###Output
_____no_output_____
###Markdown
When we convert back to a score, and then to a midi file, we can listen to what results. It turns out to be very different to what we started with, but still arguably music.
###Code
score2midi(new_score,'music.mid')
###Output
_____no_output_____
###Markdown
Table of Contents
###Code
from algo_gen.classes import Population
from algo_gen.tools.plot import show_stats
from midi_utils import convert_to_midi
def function_each_turn(population):
if population.parameters['individual'] == ['algo_gen.individuals.music', 'IndividualMusic']:
for i, indiv in enumerate(population.individuals):
convert_to_midi(indiv[0], str(i) + "ORI" + ".mid")
def function_end(population):
if population.parameters['individual'] == ['algo_gen.individuals.music', 'IndividualMusic']:
for i, indiv in enumerate(population.individuals):
convert_to_midi(indiv[0], str(i) + "ORI" + ".mid")
# if self.parameters['individual'] == ['gen_algo.individuals.music', 'IndividualMusic']:
# from algo_gen.tools.midi_utils import convert_to_midi
# for i, indiv in enumerate(self.individuals):
# convert_to_midi(indiv[0], str(i) + ".mid")
# # play_midi_file(str(i) + ".mid")
parameters = {
'configuration name': 'music',
'individual': ['algo_gen.individuals.music', 'IndividualMusic'],
'population size': 50, # 100 200 500
'chromosome size': 4, # 5 10 50 100
'function_each_turn': function_each_turn,
'function_end': function_end,
'nb turn max': 100,
'stop after no change': 10000, # int(config['nb turn max']*0.10),
# ('select_random',)
# ('select_best',)
# ('select_tournament', 2, 5)
# ('select_wheel',)
# ('adaptative' ,
# 'fixed roulette wheel' 'adaptive roulette wheel' 'adaptive pursuit' 'UCB' 'DMAB'
# [(0.25, 'select_random'), (0.25, 'select_best'),
# (0.25, 'select_tournament', 2 , 5), (0.25, 'select_wheel')])
'selection': ('select_best',),
'proportion selection': 0.04, # 2 / config['population size']
'crossover': 'uniforme', # 'mono-point' 'uniforme'
'proportion crossover': 1,
# ['n-flip', 1] ['n-flip', 3] ['n-flip', 5] ['bit-flip']
'mutation': ['bit-flip'],
'proportion mutation': 0.2, # 0.1 0.2 0.5 0.8
'insertion': 'age', # 'age' 'fitness'
'properties': [("gen_algo.fitness.properties", "SAScoreProperty2DDecorator"),
("gen_algo.fitness.properties", "CycleProperty2DDecorator"),
("gen_algo.fitness.properties", "LogPProperty2DDecorator"),
("gen_algo.fitness.properties", "DFTPropertyDecorator"),
]
}
population = Population(parameters)
stats = population.start()
show_stats(stats)
# import music21
#
# fp = '/home/user/Documents/m2/7 projet/MuGen/MuGen/midi/midi_short/'
# mf = music21.midi.MidiFile()
# mf.open(str(fp))
# mf.read()
# mf.close()
# len(mf.tracks)
#
# s = music21.midi.translate.midiFileToStream(mf)
# print(s)
#
# # print(len(s.flat.notesAndRests))
#
# s.plot('scatter', 'offset', 'pitchClass')
#
#
# # s.show()
# s.features()
#
# from pprint import pprint
#
# from music21.features import jSymbolic, native
# f = list(jSymbolic.featureExtractors) + list(native.featureExtractors)
#
# for f, v in zip(f,music21.features.allFeaturesAsList(s)):
# print(f"{f} : {v}")
# https://en.wikipedia.org/wiki/Musical_similarity
# import music21
# from glob import glob
# from tqdm.notebook import tqdm
#
# data = []
#
# for fp in tqdm(glob("/home/user/Documents/m2/7 projet/MuGen/MuGen/midi/*/*.mid")[:50]):
# mf = music21.midi.MidiFile()
# mf.open(str(fp))
# mf.read()
# mf.close()
#
# s = music21.midi.translate.midiFileToStream(mf)
# try:
# data.append(music21.features.allFeaturesAsList(s))
# except:
# print(fp)
# d = []
#
# for a in data:
# a = a[0]
# if len(a[0]) == 1:
# print("coucou")
# d.append(a[0][0])
# else:
# d.append(a[0])
#
#
# import pandas as pd
#
# dfObj = pd.DataFrame(d)
# dfObj
# su = 0
# for i in dfObj[1]:
# su += i[0]
#
# su / 49
###Output
_____no_output_____
|
2-AutoML.ipynb
|
###Markdown
AutoMLCuando utilizar AutoML* Tenemos un problema que involucra una de estas opciones. * Imagenes * Clasificacion (A nivel de imagen) * Detección de objectos (A nivel de sección) * Segmentation (A nivel de pixel) * Texto * Clasificación de texto * Extracción de entidades * Analisis de sentimiento * Datos tabulares (CSV) * Regresión * Clasificacion * Predicción (Forecasting) * Video * Clasificación * Reconocimiento de acciones * Seguimiento de objetos (Object tracking) * El problema es básico.* El equipo es pequeño o no tiene experiencia.* Se búsca un prototico o prueba de concepto rápida.* El modelo no necesitaria ser mejorado Ejemplo con datos tabulares* La primera linea del CSV ha de ser la cabecera, estos son los nombres de las columnas* Las columnas pueden contener caracteres alfanúmericos y la barra baja "_" (no puede comenzar por)* Cada CSV no puede pasar de 10GB, si pesa mas de 10GB lo puedes repartir en varios CSV hasta un máximo de 100GB* El delimitador ha de ser la coma ",".* Al menos 1000 filas para Tabular Data, 100 imagenes por clase para Vision AI No hace falta delimitar el schema del CSV (si las columnas son enteros, flotantes, strings..., etc), Vertex AI lo hace por ti.Se puede repartir los datos entre entrenamiento, validación y test de forma automatica o manual AutomáticoEjemplo para un caso sin asignar|"John"|"Doe"|"555-55-5555"||"Jane"|"Doe"|"444-44-4444"||"Roger"|"Rogers"|"123-45-6789"||"Sarah"|"Smith"|"333-33-3333"| ManualEjemplo con los datos asignados"TRAIN","John","Doe","555-55-5555""TEST","Jane","Doe","444-44-4444""TRAIN","Roger","Rogers","123-45-6789""VALIDATE","Sarah","Smith","333-33-3333" Manual/Automático"UNASSIGNED","John","Doe","555-55-5555""TEST","Jane","Doe","444-44-4444""UNASSIGNED","Roger","Rogers","123-45-6789""UNASSIGNED","Sarah","Smith","333-33-3333" ChronologicallyEn este caso se asignan los casos en el orden de aparición en el CSV. Por ejemplo un reparto 80 10 10 daría como resultado* El primer 80% de las líneas del CSV se asignaria a entrenamiento, TRAIN.* El siguiente 10% se asignaria a validacion, VALIDATION.* El último 10% se asignaria a test, TEST.
###Code
from sklearn.datasets import fetch_covtype
import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import confusion_matrix, f1_score
df = fetch_covtype(return_X_y=False, as_frame=True)['frame']
df = df[df.Cover_Type.isin([5,3])].sample(2000).reset_index(drop=True)
#df.to_csv("cover.csv", index=False)
#df = df[["Wilderness_Area_3", "Wilderness_Area_0","Elevation", "Cover_Type"]]
rfc = RandomForestClassifier(warm_start=True, n_estimators=30)
train = df.head(1500)
test = df.tail(500)
X_train, y_train = train.iloc[:, :-1].values, train.iloc[:, -1:].values.reshape(-1)
X_test, y_test = test.iloc[:, :-1].values, test.iloc[:, -1:].values.reshape(-1)
rfc.fit(X_train, y_train)
y_pred = rfc.predict(X_test)
confusion_matrix(y_test, y_pred)
import matplotlib.pyplot as plt
import numpy as np
fig, ax = plt.subplots(dpi=144)
order = np.argsort(rfc.feature_importances_)[::-1]
names = df.columns[:-1][order]
values = rfc.feature_importances_[order]
_ = plt.barh(y=names[:10], width=values[:10])
_ = plt.yticks(fontsize=4)
ax.xaxis.label.set_size(10)
import seaborn as sns
sns.pairplot(df.head(1000)[["Elevation", "Wilderness_Area_3", "Wilderness_Area_2", "Cover_Type"]], diag_kind='kde')
###Output
_____no_output_____
|
experiments/n_instances/visualization.ipynb
|
###Markdown
SGIMC
###Code
PATH_ARCH = os.path.join(PATH_DATA, 'arch_sgimc')
filename_results = []
filename_results.append(os.path.join(PATH_DATA, 'results_sgimc.gz'))
for fn in os.listdir(PATH_ARCH):
filename_results.append(os.path.join(PATH_ARCH, fn))
results = []
for fn in filename_results:
results.append(load(fn))
results_final = []
for result in results:
results_final.append(get_final_results(result))
results_final = np.array(results_final)
results_sgimc = np.mean(results_final, axis=0)
###Output
_____no_output_____
###Markdown
IMC
###Code
PATH_ARCH = os.path.join(PATH_DATA, 'arch_imc')
filename_results = []
filename_results.append(os.path.join(PATH_DATA, 'results_imc.gz'))
for fn in os.listdir(PATH_ARCH):
filename_results.append(os.path.join(PATH_ARCH, fn))
results = []
for fn in filename_results:
results.append(load(fn))
results_final = []
for result in results:
results_final.append(get_final_results(result, hyp_param='lamb'))
results_final = np.array(results_final)
results_imc = np.mean(results_final, axis=0)
###Output
_____no_output_____
###Markdown
Plots
###Code
plt.figure(figsize=(10,8))
# lines
sgimc_line = mlines.Line2D([], [], color='blue', marker='o', linewidth=2.5,
markersize=10, label='SGIMC')
imc_line = mlines.Line2D([], [], color='red', marker='s', linewidth=2.5,
markersize=10, label='IMC')
# curves
plt.plot(results_sgimc[0], results_sgimc[1], 'b-', label='SGIMC', linewidth=2.5)
plt.plot(results_imc[0], results_imc[1], 'r-', label='IMC', linewidth=2.5)
# dotes
plt.plot(results_sgimc[0], results_sgimc[1], 'bo', label='SGIMC', linewidth=2.5)
plt.plot(results_imc[0], results_imc[1], 'rs', label='IMC', linewidth=2.5)
# specify axis limits and labels
#plt.ylim((0.4,1.02))
plt.xlabel('Train to all elements ratio', fontsize=20)
plt.ylabel('Relative error', fontsize=20)
# ticks
plt.xticks(fontsize=16)
plt.yticks(fontsize=16)
# legend
plt.legend(handles=[sgimc_line, imc_line], fontsize=20, loc=1)
# save figure
# plt.savefig('/cobrain/home/Dustpelt/SGIMC/plots/n_instances.pdf', dpi=200, bbox_inches='tight')
plt.show()
###Output
_____no_output_____
|
pickle_embedding.ipynb
|
###Markdown
Improper usage of pickle rendered numpy [vulnerable to arbitrary code execution](https://snyk.io/vuln/SNYK-PYTHON-NUMPY-73513); this was demonstrated through CVE-2019-6446. The following code has been modified from the original proof of concept.
###Code
# PoC from https://snyk.io/vuln/SNYK-PYTHON-NUMPY-73513 (CVE-2019-6446)
import numpy
from numpy import __version__
print(__version__)
import os
import pickle
class Test(object):
def __init__(self):
self.a = 1
def __reduce__(self):
return (os.system,('ls',))
tmpdaa = Test()
with open("a-file.pickle",'wb') as f:
pickle.dump(tmpdaa,f)
# The original PoC used an earlier version where allow_pickle was True by default
numpy.load('a-file.pickle', allow_pickle=True)
###Output
1.18.5
###Markdown
A similar vulnerability can be found in PyTorch. Unlike NumPy, PyTorch still relies on Pickle- [only issuing a warning](https://github.com/pytorch/pytorch/pull/32593).
###Code
import torch
import os
import pickle
from torch import __version__
print(__version__)
class Test(object):
def __init__(self):
self.a = 1
def __reduce__(self):
return (os.system,('ls',))
tmpdaa = Test()
with open("a-file.pickle",'wb') as f:
torch.save(tmpdaa,f)
torch.load('a-file.pickle')
###Output
1.7.0+cu101
|
apphub/NLP/neural_machine_translation/transformer.ipynb
|
###Markdown
Neural Machine Translation Using TransformerIn this tutorial we will look at a sequence to sequence task: translating one language into another. The architecture used for the task is the famous `Transformer`.The transformer architecture was first proposed by this [paper](https://arxiv.org/abs/1706.03762). The general idea behind the architecture is the `attention` mechanism that can perform a re-weighting of the features throughout the network. Another advantage brought by the transformer architecture is that it breaks the temporal dependency of the data, allowing more efficient parallelization of training. We will implement every detail of the transformer in this tutorial. Let's get started!First let's define some hyper-parameters that we will use later.
###Code
data_dir = None
epochs=20
em_dim=128
batch_size=64
train_steps_per_epoch=None
eval_steps_per_epoch=None
###Output
_____no_output_____
###Markdown
DatasetIn this machine translation task, we will use the [TED translation dataset](https://github.com/neulab/word-embeddings-for-nmt). The dataset consists of 14 different translation tasks, such as Portuguese to English (`pt_to_en`), Russian to English (`ru_to_en`), and many others. In this tutorial, we will translate Portuguese to English. You can access this dataset through our dataset API - `tednmt`. Feel free to check the docstring of the API for other translation options.
###Code
from fastestimator.dataset.data import tednmt
train_ds, eval_ds, test_ds = tednmt.load_data(data_dir, translate_option="pt_to_en")
###Output
_____no_output_____
###Markdown
Now that the dataset is downloaded, let's check what the dataset looks like:
###Code
print("example source language:")
print(train_ds[0]["source"])
print("")
print("example target language:")
print(train_ds[0]["target"])
###Output
example source language:
entre todas as grandes privações com que nos debatemos hoje — pensamos em financeiras e económicas primeiro — aquela que mais me preocupa é a falta de diálogo político — a nossa capacidade de abordar conflitos modernos como eles são , de ir à raiz do que eles são e perceber os agentes-chave e lidar com eles .
example target language:
amongst all the troubling deficits we struggle with today — we think of financial and economic primarily — the ones that concern me most is the deficit of political dialogue — our ability to address modern conflicts as they are , to go to the source of what they 're all about and to understand the key players and to deal with them .
###Markdown
Preprocessing the languagesSince the text by itself cannot be recognized by computers, we need to perform a series of transformations to the text. Here are the steps:1. Split the sentence into words or sub-words. For example, "I love apple" can be split into ["I", "love", "apple"]. Sometimes in order to represent more words, a word is further reduced into sub-words. For example, `tokenization` can be split into `token` and `_ization`. As a result, a word like "civilization" doesn't require extra space when both `civil` and `_ization` are already in the dictionary.2. Map the tokens into a discrete index according to the dictionary. In this task, we are loading a pretrained tokenizer with a built-in dictionary already.3. Add a [start] and [end] token around every index. This is mainly to help the network identify the beginning and end of each sentence.4. When creating a batch of multiple sentences, pad the shorter sentences with 0 so that each sentence in the batch has the same length.
###Code
import fastestimator as fe
from transformers import BertTokenizer
from fastestimator.op.numpyop import Batch, NumpyOp
import numpy as np
class Encode(NumpyOp):
def __init__(self, tokenizer, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.tokenizer = tokenizer
def forward(self, data, state):
return np.array(self.tokenizer.encode(data))
pt_tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
en_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
pipeline = fe.Pipeline(
train_data=train_ds,
eval_data=eval_ds,
test_data=test_ds,
ops=[
Encode(inputs="source", outputs="source", tokenizer=pt_tokenizer),
Encode(inputs="target", outputs="target", tokenizer=en_tokenizer),
Batch(batch_size=batch_size, pad_value=0)
])
###Output
_____no_output_____
###Markdown
In the above code, `tokenizer.encode` will take the sentence and execute the step 1 - 3. The padding step is done by providing `pad_value=0` in the `Batch` Op. Preprocessing Results
###Code
data = pipeline.get_results()
print("source after processing:")
print(data["source"])
print("source batch shape:")
print(data["source"].shape)
print("---------------------------------------------------")
print("target after processing:")
print(data["target"])
print("target batch shape:")
print(data["target"].shape)
###Output
source after processing:
tensor([[ 101, 420, 1485, ..., 1061, 119, 102],
[ 101, 538, 179, ..., 0, 0, 0],
[ 101, 122, 21174, ..., 0, 0, 0],
...,
[ 101, 607, 230, ..., 0, 0, 0],
[ 101, 123, 10186, ..., 0, 0, 0],
[ 101, 11865, 3072, ..., 0, 0, 0]])
source batch shape:
torch.Size([64, 72])
---------------------------------------------------
target after processing:
tensor([[ 101, 5921, 2035, ..., 2068, 1012, 102],
[ 101, 2057, 2040, ..., 0, 0, 0],
[ 101, 1998, 1045, ..., 0, 0, 0],
...,
[ 101, 2045, 1005, ..., 0, 0, 0],
[ 101, 1996, 5424, ..., 0, 0, 0],
[ 101, 2009, 2097, ..., 0, 0, 0]])
target batch shape:
torch.Size([64, 70])
###Markdown
Transformer Architecture Attention UnitThe basic form of the attention unit is defined in `scaled_dot_product_attention`. Given a set of queries(Q), keys(K), and values(V), it first performs the matrix multiplication of Q and K. The output of this multiplication gives the matching score between various elements of Q and K. Then all the weights are normalized across the Keys dimension. Finally, the normalized score will be multiplied by the V to get the final result. The intuition behind the attention unit is essentially a dictionary look-up with interpolation.
###Code
import tensorflow as tf
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
num_heads, inp_length = tf.shape(scaled_attention_logits)[1], tf.shape(scaled_attention_logits)[2]
num_heads_mask, inp_length_mask = tf.shape(mask)[1], tf.shape(mask)[2]
# This manual tiling is to fix a auto-broadcasting issue with tensorflow
scaled_attention_logits += tf.tile(mask * -1e9, [1, num_heads // num_heads_mask, inp_length // inp_length_mask, 1])
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output
def point_wise_feed_forward_network(em_dim, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(em_dim) # (batch_size, seq_len, em_dim)
])
###Output
_____no_output_____
###Markdown
Multi-head AttentionThere are two drawbacks of the attention unit above:1. The complexity of matrix multiplication is O(N^3), when batch size or embedding dimension increases, the computation will not scale well. 2. A single attention head is limited in expressing local correlation between two words, because it calculates correlation by normalizing all embeddings dimensions. Sometimes this overall normalization will remove interesting local patterns. A good analogy is to consider a single attention unit as globally averaging a signal whereas a moving average is preferred to preserve certain information.Multi-head attention is used to overcome the issues above. It breaks the embedding dimension into multiple heads. As a result, each head's embedding dimension is divided by the number of heads, reducing the computation complexity. Moreover, each head only takes a fraction of the embedding and can be viewed as a specialized expert for a specific context. The final results can be combined using another dense layer.
###Code
from tensorflow.keras import layers
class MultiHeadAttention(layers.Layer):
def __init__(self, em_dim, num_heads):
super().__init__()
assert em_dim % num_heads == 0, "model dimension must be multiply of number of heads"
self.num_heads = num_heads
self.em_dim = em_dim
self.depth = em_dim // self.num_heads
self.wq = layers.Dense(em_dim)
self.wk = layers.Dense(em_dim)
self.wv = layers.Dense(em_dim)
self.dense = layers.Dense(em_dim)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3]) # B, num_heads, seq_len, depth
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # B, seq_len, em_dim
k = self.wk(k) # B, seq_len, em_dim
v = self.wv(v) # B, seq_len, em_dim
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) #B, seq_len, num_heads, depth
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.em_dim)) # B, seq_len, em_dim
output = self.dense(concat_attention)
return output
###Output
_____no_output_____
###Markdown
Encoder and Decoder layerBoth the encoder and decoder layers will go through multi-head attention. The decoder layer will use another multi-attention module to connect the bridge between encoder outputs and targets. Specifically, in the decoders second multi-head attention module, encoded output is used as both values and keys whereas the target embedding is used as a query to "look up" encoder information. In the end, there is a feed-forward neural network to transform the looked-up value into something useful.
###Code
class EncoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, dff, rate=0.1):
super().__init__()
self.mha = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, dff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, x, training, mask):
attn_output = self.mha(x, x, x, mask)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class DecoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, diff, rate=0.1):
super().__init__()
self.mha1 = MultiHeadAttention(em_dim, num_heads)
self.mha2 = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, diff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
self.dropout3 = layers.Dropout(rate)
def call(self, x, enc_out, training, decode_mask, padding_mask):
attn1 = self.mha1(x, x, x, decode_mask)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2 = self.mha2(enc_out, enc_out, out1, padding_mask)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2)
return out3
###Output
_____no_output_____
###Markdown
Putting Everything TogetherA transformer consists of an Encoder and Decoder, which in turn consist of multiple stacked encoder/decoder layers. One interesting property of transformers is that they do not have an intrinsic awareness of the position dimension. Therefore, a position encoding is usually done to the embedding matrix to add position context to the embedding. A nice tutorial about positional encoding can be found [here](https://kazemnejad.com/blog/transformer_architecture_positional_encoding/).
###Code
def get_angles(pos, i, em_dim):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(em_dim))
return pos * angle_rates
def positional_encoding(position, em_dim):
angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(em_dim)[np.newaxis, :], em_dim)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
class Encoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(input_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, self.em_dim)
self.enc_layers = [EncoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x
class Decoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, target_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(target_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, em_dim)
self.dec_layers = [DecoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, enc_output, decode_mask, padding_mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.dec_layers[i](x, enc_output, training, decode_mask, padding_mask)
return x
def transformer(num_layers, em_dim, num_heads, dff, input_vocab, target_vocab, max_pos_enc, max_pos_dec, rate=0.1):
inputs = layers.Input(shape=(None, ))
targets = layers.Input(shape=(None, ))
encode_mask = layers.Input(shape=(None, None, None))
decode_mask = layers.Input(shape=(None, None, None))
x = Encoder(num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=rate)(inputs, encode_mask)
x = Decoder(num_layers, em_dim, num_heads, dff, target_vocab, max_pos_dec, rate=rate)(targets,
x,
decode_mask,
encode_mask)
x = layers.Dense(target_vocab)(x)
model = tf.keras.Model(inputs=[inputs, targets, encode_mask, decode_mask], outputs=x)
return model
model = fe.build(
model_fn=lambda: transformer(num_layers=4,
em_dim=em_dim,
num_heads=8,
dff=512,
input_vocab=pt_tokenizer.vocab_size,
target_vocab=en_tokenizer.vocab_size,
max_pos_enc=1000,
max_pos_dec=1000),
optimizer_fn="adam")
###Output
_____no_output_____
###Markdown
Network OperationsNow that we have defined the transformer architecture, another thing that is worth mentioning is the mask. A mask is a boolean array that we created to tell the network to **ignore** certain words within the sentence. For example, to tell the network to ignore the words we padded, a padding mask is used. In order to not give away the answer when processing the word before it, a mask is also needed.The loss function of transformer is simply a masked cross entropy loss, as it will only consider predictions that are not masked.
###Code
from fastestimator.op.tensorop import TensorOp
from fastestimator.op.tensorop.loss import LossOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
class CreateMasks(TensorOp):
def forward(self, data, state):
inp, tar = data
encode_mask = self.create_padding_mask(inp)
dec_look_ahead_mask = self.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = self.create_padding_mask(tar)
decode_mask = tf.maximum(dec_target_padding_mask, dec_look_ahead_mask)
return encode_mask, decode_mask
@staticmethod
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
@staticmethod
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
class ShiftData(TensorOp):
def forward(self, data, state):
target = data
return target[:, :-1], target[:, 1:]
class MaskedCrossEntropy(LossOp):
def __init__(self, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
def forward(self, data, state):
y_pred, y_true = data
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), tf.float32)
loss = self.loss_fn(y_true, y_pred) * mask
loss = tf.reduce_sum(loss) / tf.reduce_sum(mask)
return loss
network = fe.Network(ops=[
ShiftData(inputs="target", outputs=("target_inp", "target_real")),
CreateMasks(inputs=("source", "target_inp"), outputs=("encode_mask", "decode_mask")),
ModelOp(model=model, inputs=("source", "target_inp", "encode_mask", "decode_mask"), outputs="pred"),
MaskedCrossEntropy(inputs=("pred", "target_real"), outputs="ce"),
UpdateOp(model=model, loss_name="ce")
])
###Output
_____no_output_____
###Markdown
Metrics and Learning Rate SchedulingThe metric used to evaluate the model is a masked accuracy, which is simply accuracy with unmasked predictions and ground truths. The learning rate scheduler uses warm-up followed by exponential decay.
###Code
import tempfile
from fastestimator.trace.adapt import LRScheduler
from fastestimator.trace.io import BestModelSaver
from fastestimator.trace.metric.bleu_score import BleuScore
from fastestimator.trace.trace import Trace
model_dir=tempfile.mkdtemp()
def lr_fn(step, em_dim, warmupstep=4000):
lr = em_dim**-0.5 * min(step**-0.5, step * warmupstep**-1.5)
return lr
class MaskedAccuracy(Trace):
def on_epoch_begin(self, data):
self.correct = 0
self.total = 0
def on_batch_end(self, data):
y_pred, y_true = data["pred"].numpy(), data["target_real"].numpy()
mask = np.logical_not(y_true == 0)
matches = np.logical_and(y_true == np.argmax(y_pred, axis=2), mask)
self.correct += np.sum(matches)
self.total += np.sum(mask)
def on_epoch_end(self, data):
data.write_with_log(self.outputs[0], self.correct / self.total)
traces = [
MaskedAccuracy(inputs=("pred", "target_real"), outputs="masked_acc", mode="!train"),
BleuScore(true_key="target_real", pred_key ="pred", output_name="bleu_score", n_gram=2, mode="!train"),
BestModelSaver(model=model, save_dir=model_dir, metric="masked_acc", save_best_mode="max"),
LRScheduler(model=model, lr_fn=lambda step: lr_fn(step, em_dim))]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
traces=traces,
epochs=epochs,
train_steps_per_epoch=train_steps_per_epoch,
eval_steps_per_epoch=eval_steps_per_epoch)
###Output
_____no_output_____
###Markdown
Start the trainingThe training will take around 30 minutes on a single V100 GPU
###Code
estimator.fit()
###Output
_____no_output_____
###Markdown
Let's translate something!
###Code
def token_to_words(sample, tokenizer):
words = tokenizer.decode(sample)
if '[CLS]' in words:
words = words[words.index('[CLS]')+5:]
if '[SEP]' in words:
words = words[:words.index('[SEP]')]
return words
sample_test_data = pipeline.get_results(mode="test")
sample_test_data = network.transform(data=sample_test_data, mode="test")
source = sample_test_data["source"].numpy()
predicted = sample_test_data["pred"].numpy()
predicted = np.argmax(predicted, axis=-1)
grouth_truth = sample_test_data["target_real"].numpy()
index = np.random.randint(0, source.shape[0])
sample_source, sample_predicted, sample_groud_truth = source[index], predicted[index], grouth_truth[index]
print("Source Language: ")
print(token_to_words(sample_source, pt_tokenizer))
print("")
print("Translation Ground Truth: ")
print(token_to_words(sample_groud_truth, en_tokenizer))
print("")
print("Machine Translation: ")
print(token_to_words(sample_predicted, en_tokenizer))
###Output
Source Language:
muito obrigado.
Translation Ground Truth:
thank you very much.
Machine Translation:
thank you very much.
###Markdown
Neural Machine Translation Using TransformerIn this tutorial we will look at a sequence to sequence task: translating one language into another. The architecture used for the task is the famous `Transformer`.The transformer architecture was first proposed by this [paper](https://arxiv.org/abs/1706.03762). The general idea behind the architecture is the `attention` mechanism that can perform a re-weighting of the features throughout the network. Another advantage brought by the transformer architecture is that it breaks the temporal dependency of the data, allowing more efficient parallelization of training. We will implement every detail of the transformer in this tutorial. Let's get started!First let's define some hyper-parameters that we will use later.
###Code
data_dir = None
epochs=20
em_dim=128
batch_size=64
max_train_steps_per_epoch=None
max_eval_steps_per_epoch=None
###Output
_____no_output_____
###Markdown
DatasetIn this machine translation task, we will use the [TED translation dataset](https://github.com/neulab/word-embeddings-for-nmt). The dataset consists of 14 different translation tasks, such as Portuguese to English (`pt_to_en`), Russian to English (`ru_to_en`), and many others. In this tutorial, we will translate Portuguese to English. You can access this dataset through our dataset API - `tednmt`. Feel free to check the docstring of the API for other translation options.
###Code
from fastestimator.dataset.data import tednmt
train_ds, eval_ds, test_ds = tednmt.load_data(data_dir, translate_option="pt_to_en")
###Output
_____no_output_____
###Markdown
Now that the dataset is downloaded, let's check what the dataset looks like:
###Code
print("example source language:")
print(train_ds[0]["source"])
print("")
print("example target language:")
print(train_ds[0]["target"])
###Output
example source language:
entre todas as grandes privações com que nos debatemos hoje — pensamos em financeiras e económicas primeiro — aquela que mais me preocupa é a falta de diálogo político — a nossa capacidade de abordar conflitos modernos como eles são , de ir à raiz do que eles são e perceber os agentes-chave e lidar com eles .
example target language:
amongst all the troubling deficits we struggle with today — we think of financial and economic primarily — the ones that concern me most is the deficit of political dialogue — our ability to address modern conflicts as they are , to go to the source of what they 're all about and to understand the key players and to deal with them .
###Markdown
Preprocessing the languagesSince the text by itself cannot be recognized by computers, we need to perform a series of transformations to the text. Here are the steps:1. Split the sentence into words or sub-words. For example, "I love apple" can be split into ["I", "love", "apple"]. Sometimes in order to represent more words, a word is further reduced into sub-words. For example, `tokenization` can be split into `token` and `_ization`. As a result, a word like "civilization" doesn't require extra space when both `civil` and `_ization` are already in the dictionary.2. Map the tokens into a discrete index according to the dictionary. In this task, we are loading a pretrained tokenizer with a built-in dictionary already.3. Add a [start] and [end] token around every index. This is mainly to help the network identify the beginning and end of each sentence.4. When creating a batch of multiple sentences, pad the shorter sentences with 0 so that each sentence in the batch has the same length.
###Code
import fastestimator as fe
from transformers import BertTokenizer
from fastestimator.op.numpyop import NumpyOp
import numpy as np
class Encode(NumpyOp):
def __init__(self, tokenizer, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.tokenizer = tokenizer
def forward(self, data, state):
return np.array(self.tokenizer.encode(data))
pt_tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
en_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
pipeline = fe.Pipeline(
train_data=train_ds,
eval_data=eval_ds,
test_data=test_ds,
batch_size=batch_size,
ops=[
Encode(inputs="source", outputs="source", tokenizer=pt_tokenizer),
Encode(inputs="target", outputs="target", tokenizer=en_tokenizer)
],
pad_value=0)
###Output
_____no_output_____
###Markdown
In the above code, `tokenizer.encode` will take the sentence and execute the step 1 - 3. The padding step is done by providing `pad_value=0` in `Pipeline`. Preprocessing Results
###Code
data = pipeline.get_results()
print("source after processing:")
print(data["source"])
print("source batch shape:")
print(data["source"].shape)
print("---------------------------------------------------")
print("target after processing:")
print(data["target"])
print("target batch shape:")
print(data["target"].shape)
###Output
source after processing:
tensor([[ 101, 420, 1485, ..., 1061, 119, 102],
[ 101, 5220, 179, ..., 0, 0, 0],
[ 101, 122, 21174, ..., 0, 0, 0],
...,
[ 101, 1307, 230, ..., 0, 0, 0],
[ 101, 123, 10186, ..., 0, 0, 0],
[ 101, 5733, 3072, ..., 0, 0, 0]])
source batch shape:
torch.Size([64, 67])
---------------------------------------------------
target after processing:
tensor([[ 101, 5921, 2035, ..., 2068, 1012, 102],
[ 101, 2057, 2040, ..., 0, 0, 0],
[ 101, 1998, 1045, ..., 0, 0, 0],
...,
[ 101, 2045, 1005, ..., 0, 0, 0],
[ 101, 1996, 5424, ..., 0, 0, 0],
[ 101, 2009, 2097, ..., 0, 0, 0]])
target batch shape:
torch.Size([64, 70])
###Markdown
Transformer Architecture Attention UnitThe basic form of the attention unit is defined in `scaled_dot_product_attention`. Given a set of queries(Q), keys(K), and values(V), it first performs the matrix multiplication of Q and K. The output of this multiplication gives the matching score between various elements of Q and K. Then all the weights are normalized across the Keys dimension. Finally, the normalized score will be multiplied by the V to get the final result. The intuition behind the attention unit is essentially a dictionary look-up with interpolation.
###Code
import tensorflow as tf
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
num_heads, inp_length = tf.shape(scaled_attention_logits)[1], tf.shape(scaled_attention_logits)[2]
num_heads_mask, inp_length_mask = tf.shape(mask)[1], tf.shape(mask)[2]
# This manual tiling is to fix a auto-broadcasting issue with tensorflow
scaled_attention_logits += tf.tile(mask * -1e9, [1, num_heads // num_heads_mask, inp_length // inp_length_mask, 1])
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output
def point_wise_feed_forward_network(em_dim, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(em_dim) # (batch_size, seq_len, em_dim)
])
###Output
_____no_output_____
###Markdown
Multi-head AttentionThere are two drawbacks of the attention unit above:1. The complexity of matrix multiplication is O(N^3), when batch size or embedding dimension increases, the computation will not scale well. 2. A single attention head is limited in expressing local correlation between two words, because it calculates correlation by normalizing all embeddings dimensions. Sometimes this overall normalization will remove interesting local patterns. A good analogy is to consider a single attention unit as globally averaging a signal whereas a moving average is preferred to preserve certain information.Multi-head attention is used to overcome the issues above. It breaks the embedding dimension into multiple heads. As a result, each head's embedding dimension is divided by the number of heads, reducing the computation complexity. Moreover, each head only takes a fraction of the embedding and can be viewed as a specialized expert for a specific context. The final results can be combined using another dense layer.
###Code
from tensorflow.keras import layers
class MultiHeadAttention(layers.Layer):
def __init__(self, em_dim, num_heads):
super().__init__()
assert em_dim % num_heads == 0, "model dimension must be multiply of number of heads"
self.num_heads = num_heads
self.em_dim = em_dim
self.depth = em_dim // self.num_heads
self.wq = layers.Dense(em_dim)
self.wk = layers.Dense(em_dim)
self.wv = layers.Dense(em_dim)
self.dense = layers.Dense(em_dim)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3]) # B, num_heads, seq_len, depth
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # B, seq_len, em_dim
k = self.wk(k) # B, seq_len, em_dim
v = self.wv(v) # B, seq_len, em_dim
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) #B, seq_len, num_heads, depth
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.em_dim)) # B, seq_len, em_dim
output = self.dense(concat_attention)
return output
###Output
_____no_output_____
###Markdown
Encoder and Decoder layerBoth the encoder and decoder layers will go through multi-head attention. The decoder layer will use another multi-attention module to connect the bridge between encoder outputs and targets. Specifically, in the decoders second multi-head attention module, encoded output is used as both values and keys whereas the target embedding is used as a query to "look up" encoder information. In the end, there is a feed-forward neural network to transform the looked-up value into something useful.
###Code
class EncoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, dff, rate=0.1):
super().__init__()
self.mha = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, dff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, x, training, mask):
attn_output = self.mha(x, x, x, mask)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class DecoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, diff, rate=0.1):
super().__init__()
self.mha1 = MultiHeadAttention(em_dim, num_heads)
self.mha2 = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, diff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
self.dropout3 = layers.Dropout(rate)
def call(self, x, enc_out, training, decode_mask, padding_mask):
attn1 = self.mha1(x, x, x, decode_mask)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2 = self.mha2(enc_out, enc_out, out1, padding_mask)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2)
return out3
###Output
_____no_output_____
###Markdown
Putting Everything TogetherA transformer consists of an Encoder and Decoder, which in turn consist of multiple stacked encoder/decoder layers. One interesting property of transformers is that they do not have an intrinsic awareness of the position dimension. Therefore, a position encoding is usually done to the embedding matrix to add position context to the embedding. A nice tutorial about positional encoding can be found [here](https://kazemnejad.com/blog/transformer_architecture_positional_encoding/).
###Code
def get_angles(pos, i, em_dim):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(em_dim))
return pos * angle_rates
def positional_encoding(position, em_dim):
angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(em_dim)[np.newaxis, :], em_dim)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
class Encoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(input_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, self.em_dim)
self.enc_layers = [EncoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x
class Decoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, target_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(target_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, em_dim)
self.dec_layers = [DecoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, enc_output, decode_mask, padding_mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.dec_layers[i](x, enc_output, training, decode_mask, padding_mask)
return x
def transformer(num_layers, em_dim, num_heads, dff, input_vocab, target_vocab, max_pos_enc, max_pos_dec, rate=0.1):
inputs = layers.Input(shape=(None, ))
targets = layers.Input(shape=(None, ))
encode_mask = layers.Input(shape=(None, None, None))
decode_mask = layers.Input(shape=(None, None, None))
x = Encoder(num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=rate)(inputs, encode_mask)
x = Decoder(num_layers, em_dim, num_heads, dff, target_vocab, max_pos_dec, rate=rate)(targets,
x,
decode_mask,
encode_mask)
x = layers.Dense(target_vocab)(x)
model = tf.keras.Model(inputs=[inputs, targets, encode_mask, decode_mask], outputs=x)
return model
model = fe.build(
model_fn=lambda: transformer(num_layers=4,
em_dim=em_dim,
num_heads=8,
dff=512,
input_vocab=pt_tokenizer.vocab_size,
target_vocab=en_tokenizer.vocab_size,
max_pos_enc=1000,
max_pos_dec=1000),
optimizer_fn="adam")
###Output
_____no_output_____
###Markdown
Network OperationsNow that we have defined the transformer architecture, another thing that is worth mentioning is the mask. A mask is a boolean array that we created to tell the network to **ignore** certain words within the sentence. For example, to tell the network to ignore the words we padded, a padding mask is used. In order to not give away the answer when processing the word before it, a mask is also needed.The loss function of transformer is simply a masked cross entropy loss, as it will only consider predictions that are not masked.
###Code
from fastestimator.op.tensorop import TensorOp
from fastestimator.op.tensorop.loss import LossOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
class CreateMasks(TensorOp):
def forward(self, data, state):
inp, tar = data
encode_mask = self.create_padding_mask(inp)
dec_look_ahead_mask = self.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = self.create_padding_mask(tar)
decode_mask = tf.maximum(dec_target_padding_mask, dec_look_ahead_mask)
return encode_mask, decode_mask
@staticmethod
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
@staticmethod
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
class ShiftData(TensorOp):
def forward(self, data, state):
target = data
return target[:, :-1], target[:, 1:]
class MaskedCrossEntropy(LossOp):
def __init__(self, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
def forward(self, data, state):
y_pred, y_true = data
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), tf.float32)
loss = self.loss_fn(y_true, y_pred) * mask
loss = tf.reduce_sum(loss) / tf.reduce_sum(mask)
return loss
network = fe.Network(ops=[
ShiftData(inputs="target", outputs=("target_inp", "target_real")),
CreateMasks(inputs=("source", "target_inp"), outputs=("encode_mask", "decode_mask")),
ModelOp(model=model, inputs=("source", "target_inp", "encode_mask", "decode_mask"), outputs="pred"),
MaskedCrossEntropy(inputs=("pred", "target_real"), outputs="ce"),
UpdateOp(model=model, loss_name="ce")
])
###Output
_____no_output_____
###Markdown
Metrics and Learning Rate SchedulingThe metric used to evaluate the model is a masked accuracy, which is simply accuracy with unmasked predictions and ground truths. The learning rate scheduler uses warm-up followed by exponential decay.
###Code
import tempfile
from fastestimator.trace.adapt import LRScheduler
from fastestimator.trace.io import BestModelSaver
from fastestimator.trace.trace import Trace
model_dir=tempfile.mkdtemp()
def lr_fn(step, em_dim, warmupstep=4000):
lr = em_dim**-0.5 * min(step**-0.5, step * warmupstep**-1.5)
return lr
class MaskedAccuracy(Trace):
def on_epoch_begin(self, data):
self.correct = 0
self.total = 0
def on_batch_end(self, data):
y_pred, y_true = data["pred"].numpy(), data["target_real"].numpy()
mask = np.logical_not(y_true == 0)
matches = np.logical_and(y_true == np.argmax(y_pred, axis=2), mask)
self.correct += np.sum(matches)
self.total += np.sum(mask)
def on_epoch_end(self, data):
data.write_with_log(self.outputs[0], self.correct / self.total)
traces = [
MaskedAccuracy(inputs=("pred", "target_real"), outputs="masked_acc", mode="!train"),
BestModelSaver(model=model, save_dir=model_dir, metric="masked_acc", save_best_mode="max"),
LRScheduler(model=model, lr_fn=lambda step: lr_fn(step, em_dim))
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
traces=traces,
epochs=epochs,
max_train_steps_per_epoch=max_train_steps_per_epoch,
max_eval_steps_per_epoch=max_eval_steps_per_epoch)
###Output
_____no_output_____
###Markdown
Start the trainingThe training will take around 30 minutes on a single V100 GPU
###Code
estimator.fit()
###Output
_____no_output_____
###Markdown
Let's translate something!
###Code
def token_to_words(sample, tokenizer):
words = tokenizer.decode(sample)
if '[CLS]' in words:
words = words[words.index('[CLS]')+5:]
if '[SEP]' in words:
words = words[:words.index('[SEP]')]
return words
sample_test_data = pipeline.get_results(mode="test")
sample_test_data = network.transform(data=sample_test_data, mode="test")
source = sample_test_data["source"].numpy()
predicted = sample_test_data["pred"].numpy()
predicted = np.argmax(predicted, axis=-1)
grouth_truth = sample_test_data["target_real"].numpy()
index = np.random.randint(0, source.shape[0])
sample_source, sample_predicted, sample_groud_truth = source[index], predicted[index], grouth_truth[index]
print("Source Language: ")
print(token_to_words(sample_source, pt_tokenizer))
print("")
print("Translation Ground Truth: ")
print(token_to_words(sample_groud_truth, en_tokenizer))
print("")
print("Machine Translation: ")
print(token_to_words(sample_predicted, en_tokenizer))
###Output
Source Language:
muito obrigada.
Translation Ground Truth:
thank you so much.
Machine Translation:
thank you very much.
###Markdown
Neural Machine Translation Using TransformerIn this tutorial we will look at a sequence to sequence task: translating one language into another. The architecture used for the task is the famous `Transformer`.The transformer architecture was first proposed by this [paper](https://arxiv.org/abs/1706.03762). The general idea behind the architecture is the `attention` mechanism that can perform a re-weighting of the features throughout the network. Another advantage brought by the transformer architecture is that it breaks the temporal dependency of the data, allowing more efficient parallelization of training. We will implement every detail of the transformer in this tutorial. Let's get started!First let's define some hyper-parameters that we will use later.
###Code
data_dir = None
epochs=20
em_dim=128
batch_size=64
max_train_steps_per_epoch=None
max_eval_steps_per_epoch=None
###Output
_____no_output_____
###Markdown
DatasetIn this machine translation task, we will use the [TED translation dataset](https://github.com/neulab/word-embeddings-for-nmt). The dataset consists of 14 different translation tasks, such as Portuguese to English (`pt_to_en`), Russian to English (`ru_to_en`), and many others. In this tutorial, we will translate Portuguese to English. You can access this dataset through our dataset API - `tednmt`. Feel free to check the docstring of the API for other translation options.
###Code
from fastestimator.dataset.data import tednmt
train_ds, eval_ds, test_ds = tednmt.load_data(data_dir, translate_option="pt_to_en")
###Output
_____no_output_____
###Markdown
Now that the dataset is downloaded, let's check what the dataset looks like:
###Code
print("example source language:")
print(train_ds[0]["source"])
print("")
print("example target language:")
print(train_ds[0]["target"])
###Output
example source language:
entre todas as grandes privações com que nos debatemos hoje — pensamos em financeiras e económicas primeiro — aquela que mais me preocupa é a falta de diálogo político — a nossa capacidade de abordar conflitos modernos como eles são , de ir à raiz do que eles são e perceber os agentes-chave e lidar com eles .
example target language:
amongst all the troubling deficits we struggle with today — we think of financial and economic primarily — the ones that concern me most is the deficit of political dialogue — our ability to address modern conflicts as they are , to go to the source of what they 're all about and to understand the key players and to deal with them .
###Markdown
Preprocessing the languagesSince the text by itself cannot be recognized by computers, we need to perform a series of transformations to the text. Here are the steps:1. Split the sentence into words or sub-words. For example, "I love apple" can be split into ["I", "love", "apple"]. Sometimes in order to represent more words, a word is further reduced into sub-words. For example, `tokenization` can be split into `token` and `_ization`. As a result, a word like "civilization" doesn't require extra space when both `civil` and `_ization` are already in the dictionary.2. Map the tokens into a discrete index according to the dictionary. In this task, we are loading a pretrained tokenizer with a built-in dictionary already.3. Add a [start] and [end] token around every index. This is mainly to help the network identify the beginning and end of each sentence.4. When creating a batch of multiple sentences, pad the shorter sentences with 0 so that each sentence in the batch has the same length.
###Code
import fastestimator as fe
from transformers import BertTokenizer
from fastestimator.op.numpyop import NumpyOp
import numpy as np
class Encode(NumpyOp):
def __init__(self, tokenizer, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.tokenizer = tokenizer
def forward(self, data, state):
return np.array(self.tokenizer.encode(data))
pt_tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
en_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
pipeline = fe.Pipeline(
train_data=train_ds,
eval_data=eval_ds,
test_data=test_ds,
batch_size=batch_size,
ops=[
Encode(inputs="source", outputs="source", tokenizer=pt_tokenizer),
Encode(inputs="target", outputs="target", tokenizer=en_tokenizer)
],
pad_value=0)
###Output
_____no_output_____
###Markdown
In the above code, `tokenizer.encode` will take the sentence and execute the step 1 - 3. The padding step is done by providing `pad_value=0` in `Pipeline`. Preprocessing Results
###Code
data = pipeline.get_results()
print("source after processing:")
print(data["source"])
print("source batch shape:")
print(data["source"].shape)
print("---------------------------------------------------")
print("target after processing:")
print(data["target"])
print("target batch shape:")
print(data["target"].shape)
###Output
source after processing:
tensor([[ 101, 420, 1485, ..., 1061, 119, 102],
[ 101, 5220, 179, ..., 0, 0, 0],
[ 101, 122, 21174, ..., 0, 0, 0],
...,
[ 101, 1307, 230, ..., 0, 0, 0],
[ 101, 123, 10186, ..., 0, 0, 0],
[ 101, 5733, 3072, ..., 0, 0, 0]])
source batch shape:
torch.Size([64, 67])
---------------------------------------------------
target after processing:
tensor([[ 101, 5921, 2035, ..., 2068, 1012, 102],
[ 101, 2057, 2040, ..., 0, 0, 0],
[ 101, 1998, 1045, ..., 0, 0, 0],
...,
[ 101, 2045, 1005, ..., 0, 0, 0],
[ 101, 1996, 5424, ..., 0, 0, 0],
[ 101, 2009, 2097, ..., 0, 0, 0]])
target batch shape:
torch.Size([64, 70])
###Markdown
Transformer Architecture Attention UnitThe basic form of the attention unit is defined in `scaled_dot_product_attention`. Given a set of queries(Q), keys(K), and values(V), it first performs the matrix multiplication of Q and K. The output of this multiplication gives the matching score between various elements of Q and K. Then all the weights are normalized across the Keys dimension. Finally, the normalized score will be multiplied by the V to get the final result. The intuition behind the attention unit is essentially a dictionary look-up with interpolation.
###Code
import tensorflow as tf
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
scaled_attention_logits += (mask * -1e9) # this is to make the softmax of masked cells to be 0
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output
def point_wise_feed_forward_network(em_dim, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(em_dim) # (batch_size, seq_len, em_dim)
])
###Output
_____no_output_____
###Markdown
Multi-head AttentionThere are two drawbacks of the attention unit above:1. The complexity of matrix multiplication is O(N^3), when batch size or embedding dimension increases, the computation will not scale well. 2. A single attention head is limited in expressing local correlation between two words, because it calculates correlation by normalizing all embeddings dimensions. Sometimes this overall normalization will remove interesting local patterns. A good analogy is to consider a single attention unit as globally averaging a signal whereas a moving average is preferred to preserve certain information.Multi-head attention is used to overcome the issues above. It breaks the embedding dimension into multiple heads. As a result, each head's embedding dimension is divided by the number of heads, reducing the computation complexity. Moreover, each head only takes a fraction of the embedding and can be viewed as a specialized expert for a specific context. The final results can be combined using another dense layer.
###Code
from tensorflow.keras import layers
class MultiHeadAttention(layers.Layer):
def __init__(self, em_dim, num_heads):
super().__init__()
assert em_dim % num_heads == 0, "model dimension must be multiply of number of heads"
self.num_heads = num_heads
self.em_dim = em_dim
self.depth = em_dim // self.num_heads
self.wq = layers.Dense(em_dim)
self.wk = layers.Dense(em_dim)
self.wv = layers.Dense(em_dim)
self.dense = layers.Dense(em_dim)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3]) # B, num_heads, seq_len, depth
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # B, seq_len, em_dim
k = self.wk(k) # B, seq_len, em_dim
v = self.wv(v) # B, seq_len, em_dim
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) #B, seq_len, num_heads, depth
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.em_dim)) # B, seq_len, em_dim
output = self.dense(concat_attention)
return output
###Output
_____no_output_____
###Markdown
Encoder and Decoder layerBoth the encoder and decoder layers will go through multi-head attention. The decoder layer will use another multi-attention module to connect the bridge between encoder outputs and targets. Specifically, in the decoders second multi-head attention module, encoded output is used as both values and keys whereas the target embedding is used as a query to "look up" encoder information. In the end, there is a feed-forward neural network to transform the looked-up value into something useful.
###Code
class EncoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, dff, rate=0.1):
super().__init__()
self.mha = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, dff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, x, training, mask):
attn_output = self.mha(x, x, x, mask)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class DecoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, diff, rate=0.1):
super().__init__()
self.mha1 = MultiHeadAttention(em_dim, num_heads)
self.mha2 = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, diff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
self.dropout3 = layers.Dropout(rate)
def call(self, x, enc_out, training, decode_mask, padding_mask):
attn1 = self.mha1(x, x, x, decode_mask)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2 = self.mha2(enc_out, enc_out, out1, padding_mask)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2)
return out3
###Output
_____no_output_____
###Markdown
Putting Everything TogetherA transformer consists of an Encoder and Decoder, which in turn consist of multiple stacked encoder/decoder layers. One interesting property of transformers is that they do not have an intrinsic awareness of the position dimension. Therefore, a position encoding is usually done to the embedding matrix to add position context to the embedding. A nice tutorial about positional encoding can be found [here](https://kazemnejad.com/blog/transformer_architecture_positional_encoding/).
###Code
def get_angles(pos, i, em_dim):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(em_dim))
return pos * angle_rates
def positional_encoding(position, em_dim):
angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(em_dim)[np.newaxis, :], em_dim)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
class Encoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(input_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, self.em_dim)
self.enc_layers = [EncoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x
class Decoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, target_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(target_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, em_dim)
self.dec_layers = [DecoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, enc_output, decode_mask, padding_mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.dec_layers[i](x, enc_output, training, decode_mask, padding_mask)
return x
def transformer(num_layers, em_dim, num_heads, dff, input_vocab, target_vocab, max_pos_enc, max_pos_dec, rate=0.1):
inputs = layers.Input(shape=(None, ))
targets = layers.Input(shape=(None, ))
encode_mask = layers.Input(shape=(None, None, None))
decode_mask = layers.Input(shape=(None, None, None))
x = Encoder(num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=rate)(inputs, encode_mask)
x = Decoder(num_layers, em_dim, num_heads, dff, target_vocab, max_pos_dec, rate=rate)(targets,
x,
decode_mask,
encode_mask)
x = layers.Dense(target_vocab)(x)
model = tf.keras.Model(inputs=[inputs, targets, encode_mask, decode_mask], outputs=x)
return model
model = fe.build(
model_fn=lambda: transformer(num_layers=4,
em_dim=em_dim,
num_heads=8,
dff=512,
input_vocab=pt_tokenizer.vocab_size,
target_vocab=en_tokenizer.vocab_size,
max_pos_enc=1000,
max_pos_dec=1000),
optimizer_fn="adam")
###Output
_____no_output_____
###Markdown
Network OperationsNow that we have defined the transformer architecture, another thing that is worth mentioning is the mask. A mask is a boolean array that we created to tell the network to **ignore** certain words within the sentence. For example, to tell the network to ignore the words we padded, a padding mask is used. In order to not give away the answer when processing the word before it, a mask is also needed.The loss function of transformer is simply a masked cross entropy loss, as it will only consider predictions that are not masked.
###Code
from fastestimator.op.tensorop import TensorOp
from fastestimator.op.tensorop.loss import LossOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
class CreateMasks(TensorOp):
def forward(self, data, state):
inp, tar = data
encode_mask = self.create_padding_mask(inp)
dec_look_ahead_mask = self.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = self.create_padding_mask(tar)
decode_mask = tf.maximum(dec_target_padding_mask, dec_look_ahead_mask)
return encode_mask, decode_mask
@staticmethod
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
@staticmethod
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
class ShiftData(TensorOp):
def forward(self, data, state):
target = data
return target[:, :-1], target[:, 1:]
class MaskedCrossEntropy(LossOp):
def __init__(self, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
def forward(self, data, state):
y_pred, y_true = data
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), tf.float32)
loss = self.loss_fn(y_true, y_pred) * mask
loss = tf.reduce_sum(loss) / tf.reduce_sum(mask)
return loss
network = fe.Network(ops=[
ShiftData(inputs="target", outputs=("target_inp", "target_real")),
CreateMasks(inputs=("source", "target_inp"), outputs=("encode_mask", "decode_mask")),
ModelOp(model=model, inputs=("source", "target_inp", "encode_mask", "decode_mask"), outputs="pred"),
MaskedCrossEntropy(inputs=("pred", "target_real"), outputs="ce"),
UpdateOp(model=model, loss_name="ce")
])
###Output
_____no_output_____
###Markdown
Metrics and Learning Rate SchedulingThe metric used to evaluate the model is a masked accuracy, which is simply accuracy with unmasked predictions and ground truths. The learning rate scheduler uses warm-up followed by exponential decay.
###Code
import tempfile
from fastestimator.trace.adapt import LRScheduler
from fastestimator.trace.io import BestModelSaver
from fastestimator.trace.trace import Trace
model_dir=tempfile.mkdtemp()
def lr_fn(step, em_dim, warmupstep=4000):
lr = em_dim**-0.5 * min(step**-0.5, step * warmupstep**-1.5)
return lr
class MaskedAccuracy(Trace):
def on_epoch_begin(self, data):
self.correct = 0
self.total = 0
def on_batch_end(self, data):
y_pred, y_true = data["pred"].numpy(), data["target_real"].numpy()
mask = np.logical_not(y_true == 0)
matches = np.logical_and(y_true == np.argmax(y_pred, axis=2), mask)
self.correct += np.sum(matches)
self.total += np.sum(mask)
def on_epoch_end(self, data):
data.write_with_log(self.outputs[0], self.correct / self.total)
traces = [
MaskedAccuracy(inputs=("pred", "target_real"), outputs="masked_acc", mode="!train"),
BestModelSaver(model=model, save_dir=model_dir, metric="masked_acc", save_best_mode="max"),
LRScheduler(model=model, lr_fn=lambda step: lr_fn(step, em_dim))
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
traces=traces,
epochs=epochs,
max_train_steps_per_epoch=max_train_steps_per_epoch,
max_eval_steps_per_epoch=max_eval_steps_per_epoch)
###Output
_____no_output_____
###Markdown
Start the trainingThe training will take around 30 minutes on a single V100 GPU
###Code
estimator.fit()
###Output
_____no_output_____
###Markdown
Let's translate something!
###Code
def token_to_words(sample, tokenizer):
words = tokenizer.decode(sample)
if '[CLS]' in words:
words = words[words.index('[CLS]')+5:]
if '[SEP]' in words:
words = words[:words.index('[SEP]')]
return words
sample_test_data = pipeline.get_results(mode="test")
sample_test_data = network.transform(data=sample_test_data, mode="test")
source = sample_test_data["source"].numpy()
predicted = sample_test_data["pred"].numpy()
predicted = np.argmax(predicted, axis=-1)
grouth_truth = sample_test_data["target_real"].numpy()
index = np.random.randint(0, source.shape[0])
sample_source, sample_predicted, sample_groud_truth = source[index], predicted[index], grouth_truth[index]
print("Source Language: ")
print(token_to_words(sample_source, pt_tokenizer))
print("")
print("Translation Ground Truth: ")
print(token_to_words(sample_groud_truth, en_tokenizer))
print("")
print("Machine Translation: ")
print(token_to_words(sample_predicted, en_tokenizer))
###Output
Source Language:
muito obrigada.
Translation Ground Truth:
thank you so much.
Machine Translation:
thank you very much.
###Markdown
Neural Machine Translation Using TransformerIn this tutorial we will look at a sequence to sequence task: translating one language into another. The architecture used for the task is the famous `Transformer`.The transformer architecture was first proposed by this [paper](https://arxiv.org/abs/1706.03762). The general idea behind the architecture is the `attention` mechanism that can perform a re-weighting of the features throughout the network. Another advantage brought by the transformer architecture is that it breaks the temporal dependency of the data, allowing more efficient parallelization of training. We will implement every detail of the transformer in this tutorial. Let's get started!First let's define some hyper-parameters that we will use later.
###Code
data_dir = None
epochs=20
em_dim=128
batch_size=64
train_steps_per_epoch=None
eval_steps_per_epoch=None
###Output
_____no_output_____
###Markdown
DatasetIn this machine translation task, we will use the [TED translation dataset](https://github.com/neulab/word-embeddings-for-nmt). The dataset consists of 14 different translation tasks, such as Portuguese to English (`pt_to_en`), Russian to English (`ru_to_en`), and many others. In this tutorial, we will translate Portuguese to English. You can access this dataset through our dataset API - `tednmt`. Feel free to check the docstring of the API for other translation options.
###Code
from fastestimator.dataset.data import tednmt
train_ds, eval_ds, test_ds = tednmt.load_data(data_dir, translate_option="pt_to_en")
###Output
_____no_output_____
###Markdown
Now that the dataset is downloaded, let's check what the dataset looks like:
###Code
print("example source language:")
print(train_ds[0]["source"])
print("")
print("example target language:")
print(train_ds[0]["target"])
###Output
example source language:
entre todas as grandes privações com que nos debatemos hoje — pensamos em financeiras e económicas primeiro — aquela que mais me preocupa é a falta de diálogo político — a nossa capacidade de abordar conflitos modernos como eles são , de ir à raiz do que eles são e perceber os agentes-chave e lidar com eles .
example target language:
amongst all the troubling deficits we struggle with today — we think of financial and economic primarily — the ones that concern me most is the deficit of political dialogue — our ability to address modern conflicts as they are , to go to the source of what they 're all about and to understand the key players and to deal with them .
###Markdown
Preprocessing the languagesSince the text by itself cannot be recognized by computers, we need to perform a series of transformations to the text. Here are the steps:1. Split the sentence into words or sub-words. For example, "I love apple" can be split into ["I", "love", "apple"]. Sometimes in order to represent more words, a word is further reduced into sub-words. For example, `tokenization` can be split into `token` and `_ization`. As a result, a word like "civilization" doesn't require extra space when both `civil` and `_ization` are already in the dictionary.2. Map the tokens into a discrete index according to the dictionary. In this task, we are loading a pretrained tokenizer with a built-in dictionary already.3. Add a [start] and [end] token around every index. This is mainly to help the network identify the beginning and end of each sentence.4. When creating a batch of multiple sentences, pad the shorter sentences with 0 so that each sentence in the batch has the same length.
###Code
import fastestimator as fe
from transformers import BertTokenizer
from fastestimator.op.numpyop import NumpyOp
import numpy as np
class Encode(NumpyOp):
def __init__(self, tokenizer, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.tokenizer = tokenizer
def forward(self, data, state):
return np.array(self.tokenizer.encode(data))
pt_tokenizer = BertTokenizer.from_pretrained("neuralmind/bert-base-portuguese-cased")
en_tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
pipeline = fe.Pipeline(
train_data=train_ds,
eval_data=eval_ds,
test_data=test_ds,
batch_size=batch_size,
ops=[
Encode(inputs="source", outputs="source", tokenizer=pt_tokenizer),
Encode(inputs="target", outputs="target", tokenizer=en_tokenizer)
],
pad_value=0)
###Output
_____no_output_____
###Markdown
In the above code, `tokenizer.encode` will take the sentence and execute the step 1 - 3. The padding step is done by providing `pad_value=0` in `Pipeline`. Preprocessing Results
###Code
data = pipeline.get_results()
print("source after processing:")
print(data["source"])
print("source batch shape:")
print(data["source"].shape)
print("---------------------------------------------------")
print("target after processing:")
print(data["target"])
print("target batch shape:")
print(data["target"].shape)
###Output
source after processing:
tensor([[ 101, 420, 1485, ..., 1061, 119, 102],
[ 101, 5220, 179, ..., 0, 0, 0],
[ 101, 122, 21174, ..., 0, 0, 0],
...,
[ 101, 1307, 230, ..., 0, 0, 0],
[ 101, 123, 10186, ..., 0, 0, 0],
[ 101, 5733, 3072, ..., 0, 0, 0]])
source batch shape:
torch.Size([64, 67])
---------------------------------------------------
target after processing:
tensor([[ 101, 5921, 2035, ..., 2068, 1012, 102],
[ 101, 2057, 2040, ..., 0, 0, 0],
[ 101, 1998, 1045, ..., 0, 0, 0],
...,
[ 101, 2045, 1005, ..., 0, 0, 0],
[ 101, 1996, 5424, ..., 0, 0, 0],
[ 101, 2009, 2097, ..., 0, 0, 0]])
target batch shape:
torch.Size([64, 70])
###Markdown
Transformer Architecture Attention UnitThe basic form of the attention unit is defined in `scaled_dot_product_attention`. Given a set of queries(Q), keys(K), and values(V), it first performs the matrix multiplication of Q and K. The output of this multiplication gives the matching score between various elements of Q and K. Then all the weights are normalized across the Keys dimension. Finally, the normalized score will be multiplied by the V to get the final result. The intuition behind the attention unit is essentially a dictionary look-up with interpolation.
###Code
import tensorflow as tf
def scaled_dot_product_attention(q, k, v, mask):
matmul_qk = tf.matmul(q, k, transpose_b=True)
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
num_heads, inp_length = tf.shape(scaled_attention_logits)[1], tf.shape(scaled_attention_logits)[2]
num_heads_mask, inp_length_mask = tf.shape(mask)[1], tf.shape(mask)[2]
# This manual tiling is to fix a auto-broadcasting issue with tensorflow
scaled_attention_logits += tf.tile(mask * -1e9, [1, num_heads // num_heads_mask, inp_length // inp_length_mask, 1])
attention_weights = tf.nn.softmax(scaled_attention_logits, axis=-1)
output = tf.matmul(attention_weights, v)
return output
def point_wise_feed_forward_network(em_dim, dff):
return tf.keras.Sequential([
tf.keras.layers.Dense(dff, activation='relu'), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(em_dim) # (batch_size, seq_len, em_dim)
])
###Output
_____no_output_____
###Markdown
Multi-head AttentionThere are two drawbacks of the attention unit above:1. The complexity of matrix multiplication is O(N^3), when batch size or embedding dimension increases, the computation will not scale well. 2. A single attention head is limited in expressing local correlation between two words, because it calculates correlation by normalizing all embeddings dimensions. Sometimes this overall normalization will remove interesting local patterns. A good analogy is to consider a single attention unit as globally averaging a signal whereas a moving average is preferred to preserve certain information.Multi-head attention is used to overcome the issues above. It breaks the embedding dimension into multiple heads. As a result, each head's embedding dimension is divided by the number of heads, reducing the computation complexity. Moreover, each head only takes a fraction of the embedding and can be viewed as a specialized expert for a specific context. The final results can be combined using another dense layer.
###Code
from tensorflow.keras import layers
class MultiHeadAttention(layers.Layer):
def __init__(self, em_dim, num_heads):
super().__init__()
assert em_dim % num_heads == 0, "model dimension must be multiply of number of heads"
self.num_heads = num_heads
self.em_dim = em_dim
self.depth = em_dim // self.num_heads
self.wq = layers.Dense(em_dim)
self.wk = layers.Dense(em_dim)
self.wv = layers.Dense(em_dim)
self.dense = layers.Dense(em_dim)
def split_heads(self, x, batch_size):
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3]) # B, num_heads, seq_len, depth
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # B, seq_len, em_dim
k = self.wk(k) # B, seq_len, em_dim
v = self.wv(v) # B, seq_len, em_dim
q = self.split_heads(q, batch_size)
k = self.split_heads(k, batch_size)
v = self.split_heads(v, batch_size)
scaled_attention = scaled_dot_product_attention(q, k, v, mask)
scaled_attention = tf.transpose(scaled_attention, perm=[0, 2, 1, 3]) #B, seq_len, num_heads, depth
concat_attention = tf.reshape(scaled_attention, (batch_size, -1, self.em_dim)) # B, seq_len, em_dim
output = self.dense(concat_attention)
return output
###Output
_____no_output_____
###Markdown
Encoder and Decoder layerBoth the encoder and decoder layers will go through multi-head attention. The decoder layer will use another multi-attention module to connect the bridge between encoder outputs and targets. Specifically, in the decoders second multi-head attention module, encoded output is used as both values and keys whereas the target embedding is used as a query to "look up" encoder information. In the end, there is a feed-forward neural network to transform the looked-up value into something useful.
###Code
class EncoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, dff, rate=0.1):
super().__init__()
self.mha = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, dff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
def call(self, x, training, mask):
attn_output = self.mha(x, x, x, mask)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output)
ffn_output = self.ffn(out1)
ffn_output = self.dropout2(ffn_output, training=training)
out2 = self.layernorm2(out1 + ffn_output)
return out2
class DecoderLayer(layers.Layer):
def __init__(self, em_dim, num_heads, diff, rate=0.1):
super().__init__()
self.mha1 = MultiHeadAttention(em_dim, num_heads)
self.mha2 = MultiHeadAttention(em_dim, num_heads)
self.ffn = point_wise_feed_forward_network(em_dim, diff)
self.layernorm1 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = layers.Dropout(rate)
self.dropout2 = layers.Dropout(rate)
self.dropout3 = layers.Dropout(rate)
def call(self, x, enc_out, training, decode_mask, padding_mask):
attn1 = self.mha1(x, x, x, decode_mask)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2 = self.mha2(enc_out, enc_out, out1, padding_mask)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1)
ffn_output = self.ffn(out2)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2)
return out3
###Output
_____no_output_____
###Markdown
Putting Everything TogetherA transformer consists of an Encoder and Decoder, which in turn consist of multiple stacked encoder/decoder layers. One interesting property of transformers is that they do not have an intrinsic awareness of the position dimension. Therefore, a position encoding is usually done to the embedding matrix to add position context to the embedding. A nice tutorial about positional encoding can be found [here](https://kazemnejad.com/blog/transformer_architecture_positional_encoding/).
###Code
def get_angles(pos, i, em_dim):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(em_dim))
return pos * angle_rates
def positional_encoding(position, em_dim):
angle_rads = get_angles(np.arange(position)[:, np.newaxis], np.arange(em_dim)[np.newaxis, :], em_dim)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
class Encoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(input_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, self.em_dim)
self.enc_layers = [EncoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x
class Decoder(layers.Layer):
def __init__(self, num_layers, em_dim, num_heads, dff, target_vocab, max_pos_enc, rate=0.1):
super().__init__()
self.em_dim = em_dim
self.num_layers = num_layers
self.embedding = layers.Embedding(target_vocab, em_dim)
self.pos_encoding = positional_encoding(max_pos_enc, em_dim)
self.dec_layers = [DecoderLayer(em_dim, num_heads, dff, rate) for _ in range(num_layers)]
self.dropout = layers.Dropout(rate)
def call(self, x, enc_output, decode_mask, padding_mask, training=None):
seq_len = tf.shape(x)[1]
x = self.embedding(x)
x *= tf.math.sqrt(tf.cast(self.em_dim, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.dec_layers[i](x, enc_output, training, decode_mask, padding_mask)
return x
def transformer(num_layers, em_dim, num_heads, dff, input_vocab, target_vocab, max_pos_enc, max_pos_dec, rate=0.1):
inputs = layers.Input(shape=(None, ))
targets = layers.Input(shape=(None, ))
encode_mask = layers.Input(shape=(None, None, None))
decode_mask = layers.Input(shape=(None, None, None))
x = Encoder(num_layers, em_dim, num_heads, dff, input_vocab, max_pos_enc, rate=rate)(inputs, encode_mask)
x = Decoder(num_layers, em_dim, num_heads, dff, target_vocab, max_pos_dec, rate=rate)(targets,
x,
decode_mask,
encode_mask)
x = layers.Dense(target_vocab)(x)
model = tf.keras.Model(inputs=[inputs, targets, encode_mask, decode_mask], outputs=x)
return model
model = fe.build(
model_fn=lambda: transformer(num_layers=4,
em_dim=em_dim,
num_heads=8,
dff=512,
input_vocab=pt_tokenizer.vocab_size,
target_vocab=en_tokenizer.vocab_size,
max_pos_enc=1000,
max_pos_dec=1000),
optimizer_fn="adam")
###Output
_____no_output_____
###Markdown
Network OperationsNow that we have defined the transformer architecture, another thing that is worth mentioning is the mask. A mask is a boolean array that we created to tell the network to **ignore** certain words within the sentence. For example, to tell the network to ignore the words we padded, a padding mask is used. In order to not give away the answer when processing the word before it, a mask is also needed.The loss function of transformer is simply a masked cross entropy loss, as it will only consider predictions that are not masked.
###Code
from fastestimator.op.tensorop import TensorOp
from fastestimator.op.tensorop.loss import LossOp
from fastestimator.op.tensorop.model import ModelOp, UpdateOp
class CreateMasks(TensorOp):
def forward(self, data, state):
inp, tar = data
encode_mask = self.create_padding_mask(inp)
dec_look_ahead_mask = self.create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = self.create_padding_mask(tar)
decode_mask = tf.maximum(dec_target_padding_mask, dec_look_ahead_mask)
return encode_mask, decode_mask
@staticmethod
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
@staticmethod
def create_look_ahead_mask(size):
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
class ShiftData(TensorOp):
def forward(self, data, state):
target = data
return target[:, :-1], target[:, 1:]
class MaskedCrossEntropy(LossOp):
def __init__(self, inputs, outputs, mode=None):
super().__init__(inputs=inputs, outputs=outputs, mode=mode)
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
def forward(self, data, state):
y_pred, y_true = data
mask = tf.cast(tf.math.logical_not(tf.math.equal(y_true, 0)), tf.float32)
loss = self.loss_fn(y_true, y_pred) * mask
loss = tf.reduce_sum(loss) / tf.reduce_sum(mask)
return loss
network = fe.Network(ops=[
ShiftData(inputs="target", outputs=("target_inp", "target_real")),
CreateMasks(inputs=("source", "target_inp"), outputs=("encode_mask", "decode_mask")),
ModelOp(model=model, inputs=("source", "target_inp", "encode_mask", "decode_mask"), outputs="pred"),
MaskedCrossEntropy(inputs=("pred", "target_real"), outputs="ce"),
UpdateOp(model=model, loss_name="ce")
])
###Output
_____no_output_____
###Markdown
Metrics and Learning Rate SchedulingThe metric used to evaluate the model is a masked accuracy, which is simply accuracy with unmasked predictions and ground truths. The learning rate scheduler uses warm-up followed by exponential decay.
###Code
import tempfile
from fastestimator.trace.adapt import LRScheduler
from fastestimator.trace.io import BestModelSaver
from fastestimator.trace.trace import Trace
model_dir=tempfile.mkdtemp()
def lr_fn(step, em_dim, warmupstep=4000):
lr = em_dim**-0.5 * min(step**-0.5, step * warmupstep**-1.5)
return lr
class MaskedAccuracy(Trace):
def on_epoch_begin(self, data):
self.correct = 0
self.total = 0
def on_batch_end(self, data):
y_pred, y_true = data["pred"].numpy(), data["target_real"].numpy()
mask = np.logical_not(y_true == 0)
matches = np.logical_and(y_true == np.argmax(y_pred, axis=2), mask)
self.correct += np.sum(matches)
self.total += np.sum(mask)
def on_epoch_end(self, data):
data.write_with_log(self.outputs[0], self.correct / self.total)
traces = [
MaskedAccuracy(inputs=("pred", "target_real"), outputs="masked_acc", mode="!train"),
BestModelSaver(model=model, save_dir=model_dir, metric="masked_acc", save_best_mode="max"),
LRScheduler(model=model, lr_fn=lambda step: lr_fn(step, em_dim))
]
estimator = fe.Estimator(pipeline=pipeline,
network=network,
traces=traces,
epochs=epochs,
train_steps_per_epoch=train_steps_per_epoch,
eval_steps_per_epoch=eval_steps_per_epoch)
###Output
_____no_output_____
###Markdown
Start the trainingThe training will take around 30 minutes on a single V100 GPU
###Code
estimator.fit()
###Output
_____no_output_____
###Markdown
Let's translate something!
###Code
def token_to_words(sample, tokenizer):
words = tokenizer.decode(sample)
if '[CLS]' in words:
words = words[words.index('[CLS]')+5:]
if '[SEP]' in words:
words = words[:words.index('[SEP]')]
return words
sample_test_data = pipeline.get_results(mode="test")
sample_test_data = network.transform(data=sample_test_data, mode="test")
source = sample_test_data["source"].numpy()
predicted = sample_test_data["pred"].numpy()
predicted = np.argmax(predicted, axis=-1)
grouth_truth = sample_test_data["target_real"].numpy()
index = np.random.randint(0, source.shape[0])
sample_source, sample_predicted, sample_groud_truth = source[index], predicted[index], grouth_truth[index]
print("Source Language: ")
print(token_to_words(sample_source, pt_tokenizer))
print("")
print("Translation Ground Truth: ")
print(token_to_words(sample_groud_truth, en_tokenizer))
print("")
print("Machine Translation: ")
print(token_to_words(sample_predicted, en_tokenizer))
###Output
Source Language:
muito obrigada.
Translation Ground Truth:
thank you so much.
Machine Translation:
thank you very much.
|
python/examples/ipynb/EarthEngineColabInstall.ipynb
|
###Markdown
*Copyright 2018 Google LLC.**SPDX-License-Identifier: Apache-2.0* Earth Engine Colab installationThis notebook demonstrates a simple installation of Earth Engine to a Colab notebook. Colab setupThis notebook section installs the Earth Engine Python API on your Colab virtual machine (VM) and will need to be executed each time a new Colab notebook is created. Colab VMs are recycled after they are idle for a while. Install Earth EngineThe Earth Engine Python API and command line tools can be installed using [Python's `pip` package installation tool](https://pypi.org/project/pip/). The following notebook cell line is starts with `!` to indicate that a shell command should be invoked.
###Code
!pip install earthengine-api
###Output
_____no_output_____
###Markdown
Authenticate to Earth EngineIn order to access Earth Engine, signup at [signup.earthengine.google.com](https://signup.earthengine.google.com).Once you have signed up and the Earth Engine package is installed, use the `earthengine authenticate` shell command to create and store authentication credentials on the Colab VM. These credentials are used by the Earth Engine Python API and command line tools to access Earth Engine servers.You will need to follow the link to the permissions page and give this notebook access to your Earth Engine account. Once you have authorized access, paste the authorization code into the input box displayed in the cell output.
###Code
import ee
# Check if the server is authenticated. If not, display instructions that
# explain how to complete the process.
try:
ee.Initialize()
except ee.EEException:
!earthengine authenticate
###Output
_____no_output_____
###Markdown
Test the installationImport the Earth Engine library and initialize it with the authorization token stored on the notebook VM. Also import a display widget and display a thumbnail image of an Earth Engine dataset.
###Code
import ee
from IPython.display import Image
# Initialize the Earth Engine module.
ee.Initialize()
# Display a thumbnail of a sample image asset.
Image(url=ee.Image('CGIAR/SRTM90_V4').getThumbUrl({'min': 0, 'max': 3000}))
###Output
_____no_output_____
|
02_fundamentos_pandas/notebook/21_identificando_e_removendo_outliers.ipynb
|
###Markdown
Relatório de Análise VIII Identificando e Removendo Outliers
###Code
%matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
# configura a plotagem nas dimensões desejadas
plt.rc('figure', figsize=(14, 6))
dados = pd.read_csv('../dados/aluguel_residencial_final.csv', sep=';')
###Output
_____no_output_____
###Markdown
Representação Box-Plot   Usando o Boxplot
###Code
dados.boxplot('Valor')
###Output
_____no_output_____
###Markdown
* visualização é comprometida por haver dados muitos discrepantes Fazendo uma seleção para verificar alguns dados discrepantes
###Code
dados[dados['Valor'] >= 500000]
###Output
_____no_output_____
###Markdown
Criando uma Series
###Code
valor = dados['Valor']
###Output
_____no_output_____
###Markdown
Removendo Outliers Observando o Modelo do Boxplot e Calculando os quartis
###Code
Q1 = valor.quantile(.25)
Q3 = valor.quantile(.75)
IIQ = Q3 - Q1
limite_inferior = Q1 - 1.5 * IIQ
limite_superior = Q3 + 1.5 * IIQ
###Output
_____no_output_____
###Markdown
Remeovendo os Outliers através de uma seleção
###Code
selecao = (valor >= limite_inferior) & (valor <= limite_superior)
dados_new = dados[selecao]
dados_new.boxplot('Valor')
###Output
_____no_output_____
###Markdown
* O Boxplot ficou muito mais visível com a remoção dos outliers Comparando com histogramas* o histograma mostra a distribuição da frequência dos dados* observando as duas variáveis, é possível ver um comportamento melhor da segunda, após o tratamento, com a remoção dos outliers
###Code
dados.hist('Valor')
dados_new.hist('Valor')
###Output
_____no_output_____
###Markdown
Exercício Obtenha o conjunto de estatísticas representado na figura acima. Para isso, utilize o arquivo aluguel_amostra.csv, e realize suas análises utilizando como variável alvo o Valor m2 (valor do metro quadrado). Lembrando que Q1 representa o 1º quartil e Q3 o 3º quartil, selecione o item com a resposta correta (considere somente duas casas decimais):
###Code
data = pd.read_csv('../dados/aluguel_amostra.csv', sep=';')
Q1 = data['Valor m2'].quantile(.25)
Q3 = data['Valor m2'].quantile(.75)
IIQ = Q3 - Q1
limite_inferior = Q1 - 1.5 * IIQ
limite_superior = Q3 + 1.5 * IIQ
data['Valor m2'].describe().round(2)
###Output
_____no_output_____
###Markdown
Resposta
###Code
print(f'[Q1] -> {Q1}')
print(f'[Q3] -> {Q3}')
print(f'[IIQ] -> {IIQ:.2f}')
print(f'[Q1 - 1.5 * IIQ] -> {limite_inferior:.2f}')
print(f'[Q3 + 1.5 * IIQ] -> {limite_superior:.2f}')
###Output
[Q1] -> 21.25
[Q3] -> 42.31
[IIQ] -> 21.06
[Q1 - 1.5 * IIQ] -> -10.34
[Q3 + 1.5 * IIQ] -> 73.90
###Markdown
Observando o Boxplot
###Code
data.boxplot('Valor m2')
###Output
_____no_output_____
###Markdown
Excluindo os outliers e observando o boxplot
###Code
selecao = (data['Valor m2'] >= limite_inferior) & (data['Valor m2'] <= limite_superior)
data = data[selecao]
data.boxplot('Valor m2')
###Output
_____no_output_____
###Markdown
Comparando a visualização com o Seaborn
###Code
import seaborn as sns
sns.boxplot(x=data['Valor m2'])
###Output
_____no_output_____
|
Parse Dailyfile.ipynb
|
###Markdown
Network Usage Statistics by MACGet detatiled and fairly accurate usage statistics for every device on your network using the version of bwmon here: https://github.com/vortex-5/ddwrt-bwmon. This is a nice little collection of scripts that interrogates the network and saves necessary information in a simple way. I wanted to be able to see changes over time, so developed this quick and dirty python program and underlying manual process to keep things running. There is still a lot that I can do to automate this, most notably using another machine as the controller - probably a RPi as it used to be before being repurposed for ADSB. The README.md file has more information on the hardware, but I am using a Buffalo router running DD-WRT that does not have permamanent memory. So if the router goes down there is a process to reload and restart the process. ORiginally, this was an automatic process fired off by an RPi watching on SCP, but currently it is manual. If the router goes down, you have to do this to restore BWMON:(I have this scripted in ```Router-Tools/reload_router.sh```)```scp -i ~/.ssh/router -r ~/Documents/repositories/Router-Tools/bwmon [email protected]:/varscp -i ~/.ssh/router mac-names.txt [email protected]:/tmp/var/bwmon/www/mac-names.js```Here are some handy SCPs for the manual part of the process.Backup the bwmon directory:```scp -i ~/.ssh/router -r [email protected]:/var/bwmon /Users/samuelcroker/Documents/repositories/Router-Tools/```Note, the bwmon directory```ssh -i ~/.ssh/router [email protected]/var/bwmon/start.sh```The cron jobs are saved in the dd-wrt app, which appears not to be volitile. We should check that out sometime.Grab the daily updates:```scp -i ~/.ssh/router [email protected]:/var/bwmon/data/*.dat ~/Documents/routerdata```
###Code
import pandas as pd
import numpy as np
import datetime
import os
from os import walk
import calmap
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
###Output
_____no_output_____
###Markdown
Usage Note:The history is maintained for all time in the second folder listed above. The router will keep all copies up until it loses power, then they are lost. For that reason, it is essential to maintain another copy. In the past, I have used a slave RPi to perform this function, but it was needed for another project so that fell by the wayside. In the current configuration I am using another computer but this is a manual process. Obviously there are lots of optimizations that can happen to fully develop this concept, but it is good enough for what it was designed to do.
###Code
#Get user input - router ip address
router_ip = input()
#update the mac list - this will only run under Macbook
#os.system("scp -i ~/.ssh/router mac-names.txt [email protected]:/tmp/var/bwmon/www/mac-names.js")
# move the dailyfiles - look for a 0 return code
os.system("scp -i ~/.ssh/router root@" + router_ip +":/var/bwmon/data/*.dat ~/Documents/routerdata")
maccsv = '/Users/samuelcroker/Documents/repositories/Router-tools/mac-names.csv'
macdf = pd.read_csv(maccsv, names = ['mac','devicename'],quotechar="'")
dailypath = '/Users/samuelcroker/Documents/routerdata/'
dailyfiles = []
for (dirpath, dirnames, filenames) in walk(dailypath):
dailyfiles.extend(filenames)
break
macdf['MAC'] = macdf[str.lower('mac')]
cutoff = 1.5
idx = 0
for f in dailyfiles:
date_parser = pd.to_datetime
dtypes = {'MAC':'str','PostIN KB':'int','PostOut KB':'int','PreIn KB':'int','PreOut KB':'int','LastSeen':'str'}
headers = ['MAC','PostIN KB','PostOut KB','PreIn KB','PreOut KB','LastSeen']
parse_dates = ['LastSeen']
if f[-3:] == 'dat' and f[0:5] == 'daily':
print(f)
df = pd.read_csv(dailypath+f, names=headers, dtype=dtypes, parse_dates=parse_dates )
df['dtstamp'] = np.repeat(datetime.date(int(f[6:10]),int(f[10:12]),int(f[12:14])),len(df))
df['idx'] = idx
if idx == 0:
finaldf = df.copy()
else:
finaldf = pd.concat([finaldf,df])
idx += 1
#calculate the totalGB column. This is approximate but very close...
finaldf['totalGB'] = (finaldf['PostIN KB'] + finaldf['PostOut KB'])/1e6
finaldf['dtstamp'] = finaldf['LastSeen'].dt.date
finaldf['hour'] = finaldf['LastSeen'].dt.hour
finaldf['year'] = finaldf['LastSeen'].dt.year
finaldf['day'] = finaldf['LastSeen'].dt.day
finaldf['month'] = finaldf['LastSeen'].dt.month
#join with devicenames
finaldf = finaldf.merge(macdf, left_on='MAC', right_on='MAC', how='left')
#Make sure unknown devices are not ignored in the groupby by replacing the null devicename with
# the mac address
# finaldf.devicename.fillna(finaldf['MAC'], inplace=True)
dyear = datetime.datetime.now().year
dmonth =datetime.datetime.now().month - 2
if dmonth <= 0:
dyear += -1
dmonth = 12 + dmonth
# create summaries
Alltotal = finaldf.groupby(['MAC','devicename'])['totalGB'].sum()
all_final = finaldf
devicetotal = finaldf.groupby(['devicename'])['totalGB'].sum()
dailytotal = finaldf.groupby(['dtstamp'])['totalGB'].sum()
smalldaily = finaldf[finaldf.totalGB < cutoff].groupby(['dtstamp','devicename'])['totalGB'].sum()
if datetime.datetime.now().month == 1:
lastK = datetime.date(datetime.datetime.now().year-1, 12 , 28)
else:
lastK = datetime.date(datetime.datetime.now().year,datetime.datetime.now().month -1 , 28)
print(lastK)
recent_df = finaldf[finaldf['LastSeen']>=lastK]
nonrecentdf = finaldf[finaldf['LastSeen']<lastK]
nonrecentdf = nonrecentdf[['MAC','LastSeen','dtstamp','year','month','day','hour','totalGB','devicename']]
nonrecentdf.to_pickle(path='/Users/samuelcroker/Documents/routerdata/legacyDaily.p')
###Output
_____no_output_____
###Markdown
Plot Calendar Map
###Code
dailytotal.index = pd.to_datetime(dailytotal.index)
plt.rcParams['figure.figsize'] = 16, 8
calmap.yearplot(data=dailytotal,year=2018,cmap='YlGn')
dailytotal.to_pickle(path='/Users/samuelcroker/Documents/routerdata/legacy.p')
pp0 = devicetotal.sort_values(ascending=False).plot(kind='bar',stacked=False, colormap='Paired',figsize=(12, 5),logy=True)
pp0.set_xlabel('Date')
pp0.set_ylabel('GB')
pp0.set_title("All Devices - All Time")
#df0 = devicetotal.unstack('devicename').fillna(0)
pp0 = dailytotal[dailytotal.index.date >= datetime.date(dyear,dmonth,1)].plot(kind='bar',stacked=False, colormap='Paired',figsize=(12, 5))
pp0.set_xlabel('Date - One day ahead')
pp0.set_ylabel('GB')
pp0.set_title("All Devices - All Time")
###Output
_____no_output_____
###Markdown
Billing cycle ends on the 28th
###Code
recent_device = recent_df.groupby(['devicename'])['totalGB'].sum()
mx = recent_df.groupby(['devicename'])['totalGB'].sum().max()
tlab = [0.0001,0.1,10,100,1000,mx]
pp0 = recent_device.sort_values(ascending=False).plot(kind='bar',
stacked=False, colormap='Paired',figsize=(12, 5),logy=True,yticks=tlab)
pp0.set_yscale = 'symlog'
pp0.set_yticklabels(tlab)
pp0.set_xlabel('Date')
pp0.set_ylabel('GB')
pp0.set_title("All Devices - Current Billing Cycle")
# Make a list
# recent_device.sort_values(ascending=False)
devicedaily = recent_df.groupby(['dtstamp'])['totalGB'].sum()
pp0 = devicedaily.plot(kind='bar',stacked=False, colormap='Paired',figsize=(12, 5))
pp0.set_xlabel('Date')
pp0.set_ylabel('GB')
pp0.set_title("Current Billing Cycle")
devicedaily = recent_df.groupby(['hour'])['totalGB'].sum()
pp0 = devicedaily.plot(kind='bar',stacked=False, colormap='Paired',figsize=(12, 5))
pp0.set_xlabel('Hour')
pp0.set_ylabel('GB')
pp0.set_title("Current Billing Cycle - Hourly Usage")
devicehour = pd.pivot_table(data=recent_df,
index='hour',
values='totalGB',
columns='devicename')
sns.heatmap(devicehour, cmap='coolwarm',
annot=True,
fmt=".1f",
annot_kws={'size':6})
###Output
_____no_output_____
|
doc/survey_eda.ipynb
|
###Markdown
MDS Survey EDA
###Code
suppressWarnings(library(tidyverse))
# load data
suppressMessages(df <- read_csv('data/survey_results.csv'))
lu <- tibble(id = seq(0, 6, 1), question = colnames(df)) %>%
mutate(id = paste0("Q", id))
new_cols <- lu$id
df <- df %>%
rename_at(vars(colnames(df)), ~new_cols) %>%
rename("Timestamp" = "Q0")
###Output
── Attaching packages ─────────────────────────────────────── tidyverse 1.2.1 ──
✔ ggplot2 3.1.0 ✔ purrr 0.3.0
✔ tibble 2.0.1 ✔ dplyr 0.8.0.1
✔ tidyr 0.8.2 ✔ stringr 1.4.0
✔ readr 1.3.1 ✔ forcats 0.4.0
── Conflicts ────────────────────────────────────────── tidyverse_conflicts() ──
✖ dplyr::filter() masks stats::filter()
✖ dplyr::lag() masks stats::lag()
###Markdown
Glance at the data In this milestone, we collected 53 responses from MDS students and TAs. Each observation contains 7 attributes including Timestamp and 6 questions that were asked in the survey. The description of each question is shown in the lookup table below.
###Code
dim(df)
df %>% head()
###Output
_____no_output_____
###Markdown
Lookup table for Q1 -> Q6
###Code
(lu <- filter(lu, id != "Q0")) # remove timestamp
###Output
_____no_output_____
###Markdown
Summary table for each question
###Code
(df %>% group_by(Q1) %>% summarise(count = n(), percentage = n()/nrow(df)) %>% t())
(df %>% group_by(Q2) %>% summarise(count = n(), percentage = n()/nrow(df)) %>% t())
(df %>% group_by(Q3) %>% summarise(count = n(), percentage = n()/nrow(df)) %>% t())
(df %>% group_by(Q4) %>% summarise(count = n(), percentage = n()/nrow(df)) %>% t())
(df %>% group_by(Q5) %>% summarise(count = n(), percentage = n()/nrow(df)) %>% t())
(df %>% group_by(Q6) %>% summarise(count = n(), percentage = n()/nrow(df)) %>% t())
###Output
_____no_output_____
###Markdown
Reorder categorical variables
###Code
df_fact <- df %>%
select(-Timestamp) %>%
mutate_all(as_factor)
# relevel factors ...
df_fact$Q1 <- df_fact$Q1 %>%
fct_relevel("Less than 1 year", "1 - 3 years", "3 - 5 years", "More than 5 years")
df_fact$Q3 <- df_fact$Q3 %>%
fct_expand("PhD")
df_fact$Q5 <- df_fact$Q5 %>%
fct_relevel("Less than 1 year", "1 - 3 years", "3 - 5 years", "More than 5 years")
df_fact$Q6 <- df_fact$Q6 %>%
fct_expand("2 (disappointed)") %>%
fct_relevel("1 (very disappointed)", "2 (disappointed)","3 (ok)", "4 (enjoy)", "5 (enthusiastic)")
###Output
_____no_output_____
###Markdown
Plot count data
###Code
options(repr.plot.width = 8, repr.plot.height = 6)
suppressWarnings(df_fact_sum <- df_fact %>%
gather(key = "question", value = "answer") %>%
group_by(question, answer) %>%
summarise(freq = n()) %>%
ungroup() %>%
mutate(answer = factor(answer)))
ggplot(df_fact_sum, aes(reorder(answer, freq), y = freq)) +
geom_bar(stat = "identity", fill = "dodgerblue") +
facet_wrap(~question, scales = "free", ncol = 2) +
coord_flip() +
labs(x= "Response", y = "Count") +
theme_bw()
###Output
_____no_output_____
###Markdown
From these plots, we cannot say there is an obvious relationship between the number of years of full-time work experience a person has and their self-rated enjoyment of the MDS program. We need to take a deeper look at the data. Plot heatmaps of variables showing how they influence the response variable First, create function to plot heatmaps
###Code
plot_heat <- function(df, x, y, fill=freq){
group_var1 <- enquo(x)
group_var2 <- enquo(y)
fill_var <- enquo(fill)
tbl <- df_fact %>%
group_by(!!group_var1, !!group_var2) %>%
summarise(freq = n())
p <- ggplot(tbl, aes_string(x = group_var1, y = group_var2, fill = fill_var)) +
geom_tile()+
scale_y_discrete(drop = FALSE) +
scale_x_discrete(drop = FALSE) +
scale_fill_gradient(low = "#ffeda0", high = "#f03b20") +
labs(y = "", x = "", fill = "") +
theme_bw()
p
}
options(repr.plot.width = 6, repr.plot.height = 4)
###Output
_____no_output_____
###Markdown
Number of years of prior full-time work experience vs. self-rated enjoyment of MDS program
###Code
plot_heat(df_fact, x = Q1, y = Q6)
###Output
_____no_output_____
###Markdown
From this heatmap, we can see that for the people who have work expriences no more 3 years, 4(enjoy) is the level they chose most. For the people who have 3-5 years work expriences, although 5(enthusiastic) is the level they chose most, there are also a great amout of people chose 3(ok). For people who have more than 5 years work expriences, they tend to chose eith 5(enthusiastic) or 4(enjoy). Field of study vs. enjoyment of MDS program
###Code
plot_heat(df_fact, x = Q2, y = Q6)
###Output
_____no_output_____
###Markdown
People studied science and arts have rated the program highly by either chose 5(enthusiastic) or 4(enjoy) most. People studied business tend to choose 4(enjoy) most. For people studied Engineering and other tend to have more diverse responses. Level of education vs. enjoyment of MDS program
###Code
plot_heat(df_fact, x = Q3, y = Q6)
###Output
_____no_output_____
###Markdown
People who had a Bachelor's Degree tend to chose 4(enjoy) most, whereas people with a Master's Degree responded variously. Number of years of relevant prior full-time work experience vs. enjoyment of MDS program
###Code
plot_heat(df_fact, x = Q4, y = Q6)
###Output
_____no_output_____
###Markdown
People who had fewer that 5 years of full-time work experiences that are applicable to the program tend to response 4(enjoy) most, and people who had more that 5 years of full-time work experiences that are applicable to the program tend to response 5(enthusiastic) most. Number of years of coding experience vs. enjoyment of MDS program
###Code
plot_heat(df_fact, x = Q5, y = Q6)
###Output
_____no_output_____
###Markdown
In general, people with fewer than 3 years of coding expriences tend to choose 5(enthusiastic) or 4(enjoy). However, there are also a decent amount of people with less than 1 year coding expriences rate this program as 1(very disappointed). People with more than 3 years of coding expriences tend to choose 4(enjoy). Plot heatmaps of confounding variables showing how they influence the independent variable Field of study vs. number of years of full time work experience
###Code
plot_heat(df_fact, x = Q2, y = Q1)
###Output
_____no_output_____
###Markdown
It seems that more people with a background in Science have less than 3 years full-time working experience than people in other fields before entering the MDS program. Number of years of coding experience vs. number of years of full time work experience
###Code
p <- plot_heat(df_fact, x = Q5, y = Q1)
p + labs(x = "Number of years of coding experience", y = "Number of years of full-time experience")
###Output
_____no_output_____
|
week3_map_clustering.ipynb
|
###Markdown
Import libraries
###Code
import numpy as np
import pandas as pd
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
import requests
import json
import folium
from geopy.geocoders import Nominatim
from pandas.io.json import json_normalize
import matplotlib.cm as cm
import matplotlib.colors as colors
from sklearn.cluster import KMeans
###Output
_____no_output_____
###Markdown
Import Data
###Code
latlong = pd.read_csv('week3_postcalcode_latlong.csv',index_col=0)
latlong.head()
toronto = latlong[latlong['Borough'].str.contains('Toronto')].reset_index(drop=True)
toronto.head()
print('Number of boroughs:', len(set(toronto['Borough'])))
print('Number of neighbourhoods:', len(set(toronto['Neighbourhood'])))
###Output
Number of boroughs: 4
Number of neighbourhoods: 39
###Markdown
Map Neighbourhoods of Toronto
###Code
address = 'Toronto'
geolocator = Nominatim()
location = geolocator.geocode(address)
latitude = location.latitude
longitude = location.longitude
print('Toronto latitude:', latitude)
print('Toronto longitude:', longitude)
map_toronto = folium.Map(location=[latitude,longitude],zoom_start=12)
for bor,nei,lat,long in zip(toronto['Borough'],toronto['Neighbourhood'],toronto['Latitude'],toronto['Longitude']):
label = '{}, {}'.format(nei,bor)
lable = folium.Popup(label,parse_html=True)
folium.CircleMarker([lat,long], radius=5, popup=label, color='red',
fill=True, fill_color='#FFA500', fill_opacity=0.5,
parse_html=False).add_to(map_toronto)
map_toronto
CLIENT_ID = 'AJLIODOGAOKRUSCO0CA2YARCQ0SBZ1WPIJX3NVYOBXHUVKCG' # your Foursquare ID
CLIENT_SECRET = 'PIGHWKRFV5NPKJ1WE2J2115EYRD01M3WU4GLUCLYYQVTQR0L' # your Foursquare Secret
VERSION = '20180605' # Foursquare API version
###Output
_____no_output_____
###Markdown
Explore Neighbourhoods in Toronto Get venues in all neighbourhoods
###Code
def getNearbyVenues(names, latitudes, longitudes, radius=500, limit=100):
venues_list=[]
for name, lat, lng in zip(names, latitudes, longitudes):
print(name)
# create the API request URL
url = 'https://api.foursquare.com/v2/venues/explore?&client_id={}&client_secret={}&v={}&ll={},{}&radius={}&limit={}'.format(
CLIENT_ID,
CLIENT_SECRET,
VERSION,
lat,
lng,
radius,
limit)
# make the GET request
results = requests.get(url).json()["response"]['groups'][0]['items']
# return only relevant information for each nearby venue
venues_list.append([(
name,
lat,
lng,
v['venue']['name'],
v['venue']['location']['lat'],
v['venue']['location']['lng'],
v['venue']['categories'][0]['name']) for v in results])
nearby_venues = pd.DataFrame([item for venue_list in venues_list for item in venue_list])
nearby_venues.columns = ['Neighbourhood',
'Neighbourhood Latitude',
'Neighbourhood Longitude',
'Venue',
'Venue Latitude',
'Venue Longitude',
'Venue Category']
return(nearby_venues)
toronto_venues = getNearbyVenues(names=toronto['Neighbourhood'],
latitudes=toronto['Latitude'],
longitudes=toronto['Longitude'])
print(toronto_venues.shape)
toronto_venues.head(10)
toronto_venues.groupby('Neighbourhood').count()
print('Number of unique categories:',len(toronto_venues['Venue Category'].unique()))
###Output
Number of unique categories: 229
###Markdown
Characterize each neighbourhood One-hot encoding of revenue categories
###Code
toronto_onehot = pd.get_dummies(toronto_venues[['Venue Category']], prefix='',prefix_sep='')
toronto_onehot['Neighbourhood'] = toronto_venues['Neighbourhood']
columns = [toronto_onehot.columns[-1]]+list(toronto_onehot.columns[:-1])
toronto_onehot = toronto_onehot[columns]
toronto_onehot.head()
#remove some irrelevant categories
toronto_onehot.drop(['Neighborhood','Intersection'],axis=1,inplace=True)
toronto_onehot.shape
###Output
_____no_output_____
###Markdown
Group revenues categories by neighbourhoods
###Code
toronto_grouped = toronto_onehot.groupby('Neighbourhood').mean().reset_index()
toronto_grouped.head()
toronto_grouped.shape
###Output
_____no_output_____
###Markdown
Find top 10 venue categories for each neighbourhood
###Code
def return_most_common_venues(row, num_top_venues):
row_categories = row.iloc[1:]
row_categories_sorted = row_categories.sort_values(ascending=False)
return row_categories_sorted.index.values[0:num_top_venues]
num_top_venues = 10
indicators = ['st', 'nd', 'rd']
# create columns according to number of top venues
columns = ['Neighbourhood']
for ind in np.arange(num_top_venues):
try:
columns.append('{}{} Most Common Venue'.format(ind+1, indicators[ind]))
except:
columns.append('{}th Most Common Venue'.format(ind+1))
# create a new dataframe
toronto_venues_sorted = pd.DataFrame(columns=columns)
toronto_venues_sorted['Neighbourhood'] = toronto_grouped['Neighbourhood']
for ind in np.arange(toronto_grouped.shape[0]):
toronto_venues_sorted.iloc[ind, 1:] = return_most_common_venues(toronto_grouped.iloc[ind, :], num_top_venues)
toronto_venues_sorted.head()
###Output
_____no_output_____
###Markdown
K-means Clustering Clustering on all venue categories
###Code
toronto_grouped_clustering = toronto_grouped.drop('Neighbourhood',axis=1)
toronto_grouped_clustering.head()
kclusters = 5
kmeans = KMeans(n_clusters=kclusters,random_state=0,n_jobs=-1,max_iter=500)
kmeans.fit(toronto_grouped_clustering)
###Output
_____no_output_____
###Markdown
Mapping clusters
###Code
toronto_venues_sorted.insert(0,'Cluster Labels',kmeans.labels_)
toronto_map = toronto[['Neighbourhood','Borough','Latitude','Longitude']]
toronto_map = toronto_map.join(toronto_venues_sorted.set_index('Neighbourhood'),on='Neighbourhood')
toronto_map
# create map
map_clusters = folium.Map(location=[latitude, longitude], zoom_start=11)
# set color scheme for the clusters
x = np.arange(kclusters)
ys = [i + x + (i*x)**2 for i in range(kclusters)]
colors_array = cm.rainbow(np.linspace(0, 1, len(ys)))
rainbow = [colors.rgb2hex(i) for i in colors_array]
# add markers to the map
markers_colors = []
for lat, lon, poi, cluster in zip(toronto_map['Latitude'], toronto_map['Longitude'], toronto_map['Neighbourhood'], toronto_map['Cluster Labels']):
label = folium.Popup(str(poi) + ' Cluster ' + str(cluster), parse_html=True)
folium.CircleMarker(
[lat, lon],
radius=5,
popup=label,
color=rainbow[cluster-1],
fill=True,
fill_color=rainbow[cluster-1],
fill_opacity=0.7).add_to(map_clusters)
map_clusters
###Output
_____no_output_____
###Markdown
Show 10 most common venue categories for each cluster
###Code
toronto_map.loc[toronto_map['Cluster Labels'] == 0, toronto_map.columns[[1] + list(range(5, toronto_map.shape[1]))]]
toronto_map.loc[toronto_map['Cluster Labels'] == 1, toronto_map.columns[[1] + list(range(5, toronto_map.shape[1]))]]
toronto_map.loc[toronto_map['Cluster Labels'] == 2, toronto_map.columns[[1] + list(range(5, toronto_map.shape[1]))]]
toronto_map.loc[toronto_map['Cluster Labels'] == 3, toronto_map.columns[[1] + list(range(5, toronto_map.shape[1]))]]
toronto_map.loc[toronto_map['Cluster Labels'] == 4, toronto_map.columns[[1] + list(range(5, toronto_map.shape[1]))]]
###Output
_____no_output_____
|
back-end/notebooks/eval_calc.ipynb
|
###Markdown
Interassessor Agreement
###Code
def extract_evaluations(sorted_list):
evals = []
for q in sorted_list:
evals.append([int(e['relevant']) for e in q['evaluations']])
return evals
st_sorted = sorted(stijn, key = lambda e: e['query'].strip())
dn_sorted = sorted(don, key = lambda e: e['query'].strip())
ws_sorted = sorted(wessel, key = lambda e: e['query'].strip())
st_evals = extract_evaluations(st_sorted)
dn_evals = extract_evaluations(dn_sorted)
ws_evals = extract_evaluations(ws_sorted)
def nr_agreed(evals1, evals2):
print(evals1 + evals2)
def coef(ev1, ev2):
all_evals_1 = [val for sublist in ev1 for val in sublist]
all_evals_2 = [val for sublist in ev2 for val in sublist]
rel1 = sum(all_evals_1)
rel2 = sum(all_evals_2)
agreed_total = [int(all_evals_1[i] == all_evals_2[i]) for i in range(len(all_evals_1))]
agreed_nr = sum(agreed_total)
total_evals = len(agreed_total)
pA = agreed_nr / total_evals
pRel = rel1 / total_evals * rel2 / total_evals
pIrrel = (total_evals - rel1) / total_evals * (total_evals - rel2) / total_evals
pE = pRel + pIrrel
coef = (pA - pE) / (1 - pE)
return coef
a = coef(st_evals, dn_evals)
b = coef(st_evals, ws_evals)
c = coef(dn_evals, ws_evals)
print((a + b + c)/3)
###Output
0.7674039746281536
|
Limpeza de Dados (Estruturado).ipynb
|
###Markdown
Limpeza de Dados (Dados Estruturados)
###Code
# bibliotecas necessárias para nosso experimento
import numpy as np
import pandas as pd
###Output
_____no_output_____
###Markdown
Dados Artificiais
###Code
data = {"Birth": ["","10/02/2000","10/02/2000","10/02/1988","10/02/1990","10/02/1995","10/02/1995","10/02/2002","10/02/1970"],
"Age": [1000,20,20,32,30,25,35,18,50],
"Sex":["","m","M","Male","m","f","F",np.nan,"F"],
"Job": ["","Arts & Design","Arts & Design","Business","busines","Healthcare","Healthcare","Food","@#4"],
"Income": ["<50k","<50k","<50k",">50k",">50k",">50k","<50k","<50k",">50k"]}
###Output
_____no_output_____
###Markdown
Convertendo para um DataFrame
###Code
df = pd.DataFrame(data)
df
###Output
_____no_output_____
###Markdown
Filtrando VariáveisMuitas vezes devemos selecionar as variáveis que nos interessam para um problema específico ou mesmo porque algumas são redundantes.
###Code
df.drop(["Birth"],axis=1,inplace=True) # apagando a coluna "Birth" no eixo=1 (por coluna). inplace significa para apagar do dataset corrente e não gerar uma cópia.
df
###Output
_____no_output_____
###Markdown
Descobrindo Inconsistências
###Code
df.Sex.unique()
df.Job.unique()
df.Income.unique()
###Output
_____no_output_____
###Markdown
Podemos notar uma formatação distinta (Sexo e Emprego), porém com a mesma semântica. Devemos corrigir.
###Code
df.Sex.replace('m','M',inplace=True)
df.Sex.replace('Male','M',inplace=True)
df.Sex.replace('f','F',inplace=True)
df.Job.replace('busines','Business',inplace=True)
df
###Output
_____no_output_____
###Markdown
Tratamento de ErrosTemos dois exemplos com erro: 0 e 8. O exemplo 0, pode ser descartado, pois apresenta apenas uma informação e está errada. Já o exemplo dois, deve ser tratado e mantido.
###Code
df.drop(0,inplace=True)
df.Job.replace("@#4",np.nan,inplace=True) # substituindo valores incorretos por Nulo
df
###Output
_____no_output_____
###Markdown
Missing ValuesNesta fase, podemos substituir as variáveis categóricas com valores nulos por algo que seja mais indicativo para uma futura visualização. Entretanto, deve-se tomar cuidado se o número de Nulos é demasiadamente elevado. Se for, é melhor não substituir para que não ocupe espaço. Já para valores de variáveis numéricas, podemos deixar o nulo por enquanto, pois não atrapalhará na visualização.
###Code
df.Job.replace(np.nan,"Não Informado",inplace=True)
df.Sex.replace(np.nan,"A",inplace=True)
df
###Output
_____no_output_____
###Markdown
Removendo duplicatas (Dependerá do seu problema)Durante a integração entre várias bases ou mesmo uma única base muito grande pode conter duplicatas. Aqui, vocÊ pode ou não remover duplicatas. Se você quiser verificar quanto indivíduos ocorrem com o mesmo perfil, não se deve remover. Já para problemas de aprendizado em que deseja-se, por exemplo, prever algo com base nas informações do indivíduo, então é necessário remover.
###Code
df[df.duplicated(keep = False)] # verificando quais exemplos são duplicatas
#Removendo
df = df[False == df.duplicated(keep = "first")] # mantenha apenas o primeiro duplicado. Os demais são jogados fora.
df
###Output
_____no_output_____
###Markdown
Filtrando por exemploUma vez que os valores estão tratados, podemos agora selecionar nossas amostras para trabalhar de acordo com alguma regra que satisfaça nosso problema a ser investigado. Por exemplo, queremos trabalhar com pessoas que não sejam da área "Food" e também que não tenha idade supeior a 40 anos.
###Code
df.Age <= 40 # nosso filtro para idade
df.Job != "Food" # nosso filtro para evitarmos Food
# aplicando os filtros
df = df[df.Age <= 40]
df = df[df.Job != "Food"]
###Output
_____no_output_____
###Markdown
Agora temos nosso conjunto de dados pronto para análises
###Code
df
###Output
_____no_output_____
|
notes/.ipynb_checkpoints/example-checkpoint.ipynb
|
###Markdown
Pandas Highcharts Example * Use [Highcharts](http://highcharts.com) to plot [pandas](http://pandas.pydata.org) DataFrame* Code on Github at [pandas-highcharts](https://github.com/gtnx/pandas-highcharts) Import
###Code
%load_ext autoreload
%autoreload 2
import pandas as pd
import datetime
import os
import numpy as np
from pandas.compat import StringIO
from pandas.io.common import urlopen
from IPython.display import display, display_pretty, Javascript, HTML
from pandas_highcharts.core import serialize
from pandas_highcharts.display import display_charts
import matplotlib.pyplot as plt
# Data retrieved from http://www.quandl.com/api/v1/datasets/ODA/DEU_PCPIPCH.csv?column=1
data = """Date,Value\n2019-12-31,1.7\n2018-12-31,1.7\n2017-12-31,1.7\n2016-12-31,1.5\n2015-12-31,1.247\n2014-12-31,0.896\n2013-12-31,1.601\n2012-12-31,2.13\n2011-12-31,2.498\n2010-12-31,1.158\n2009-12-31,0.226\n2008-12-31,2.738\n2007-12-31,2.285\n2006-12-31,1.784\n2005-12-31,1.92\n2004-12-31,1.799\n2003-12-31,1.022\n2002-12-31,1.346\n2001-12-31,1.904\n2000-12-31,1.418\n1999-12-31,0.626\n1998-12-31,0.593\n1997-12-31,1.542\n1996-12-31,1.19\n1995-12-31,1.733\n1994-12-31,2.717\n1993-12-31,4.476\n1992-12-31,5.046\n1991-12-31,3.474\n1990-12-31,2.687\n1989-12-31,2.778\n1988-12-31,1.274\n1987-12-31,0.242\n1986-12-31,-0.125\n1985-12-31,2.084\n1984-12-31,2.396\n1983-12-31,3.284\n1982-12-31,5.256\n1981-12-31,6.324\n1980-12-31,5.447\n"""
df = pd.read_csv(StringIO(data), index_col=0, parse_dates=True)
df = df.sort_index()
###Output
The autoreload extension is already loaded. To reload it, use:
%reload_ext autoreload
###Markdown
Basic examples
###Code
df.head()
%reload_ext autoreload
display_charts(df, chart_type="stock", title="Germany inflation rate")
display_charts(df, kind="bar", title="Germany inflation rate")
display_charts(df, kind="barh", title="Germany inflation rate")
display_charts(df, title="Germany inflation rate", legend=None, kind="bar", figsize = (400, 200))
display_charts(df, title="Germany inflation rate", kind="bar", render_to="chart5", zoom="xy")
# Data retrieved from https://www.quandl.com/api/v1/datasets/CVR/ANGEL_SECTORS.csv
data = """Year,Software,Healthcare,Hardware,Biotech,Telecom,Manufacturing,Financial Products and Services,IT Services,Industrial/Energy,Retail,Media\n2013-12-31,23.0,14.0,,11.0,,,7.0,,,7.0,16.0\n2012-12-31,23.0,14.0,,11.0,,,,,7.0,12.0,7.0\n2011-12-31,23.0,19.0,,13.0,,,,7.0,13.0,,5.0\n2010-12-31,16.0,30.0,,15.0,,,,5.0,8.0,5.0,\n2009-12-31,19.0,17.0,,8.0,,,5.0,,17.0,9.0,\n2008-12-31,13.0,16.0,,11.0,,,,,8.0,12.0,7.0\n2007-12-31,27.0,19.0,,12.0,,,,,8.0,6.0,5.0\n2006-12-31,18.0,21.0,,18.0,,,6.0,,6.0,8.0,\n2005-12-31,18.0,20.0,8.0,12.0,,,,6.0,6.0,,6.0\n2004-12-31,22.0,16.0,10.0,10.0,6.0,,8.0,8.0,,7.0,\n2003-12-31,26.0,13.0,12.0,11.0,5.0,12.0,,,,,\n2002-12-31,40.0,14.0,5.0,5.0,5.0,,,,,,\n"""
df3 = pd.read_csv(StringIO(data), index_col=0, parse_dates=True)
df3 = df3.fillna(0) / 100
df4 = pd.DataFrame(df3.mean(), columns=['ratio'])
df4['total'] = 1
display_charts(df4, kind='pie', y=['ratio'], title='Angel Deals By Sector', tooltip={'pointFormat': '{series.name}: <b>{point.percentage:.1f}%</b>'})
###Output
_____no_output_____
###Markdown
Highcharts specific
###Code
df4 = pd.DataFrame(df3.sum(), columns=['sum'])
#df4.to_dict('series').items()[0][1].tolist()
display_charts(df4, polar=True, kind='bar', ylim=(0, 2.3), title='Angel Deals By Sector')
###Output
_____no_output_____
|
demo-31.ipynb
|
###Markdown
Plot COVID-19 data on a specific department
###Code
%load_ext lab_black
%matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
pd.plotting.register_matplotlib_converters()
dep = "31" # Haute-Garonne
figsize = (15, 10)
covid_url = (
"https://www.data.gouv.fr/fr/datasets/r/63352e38-d353-4b54-bfd1-f1b3ee1cabd7"
)
###Output
_____no_output_____
###Markdown
Load table with COVID-19 data (figures by dates)
###Code
df = pd.read_csv(covid_url, sep=";", parse_dates=True, index_col=2)
df = df.query("sexe == 0") # sum male/female
df.drop(columns=["sexe"], inplace=True)
df.sort_index(inplace=True)
df.rename(
columns={
"hosp": "Number of people hospitalized",
"rea": "Number of people in resuscitation or critical care",
"rad": "Total amount of patients that returned home",
"dc": "Total amount of deaths",
},
inplace=True,
)
df.head(2)
sdf = df[df["dep"] == dep]
f, ax = plt.subplots(figsize=figsize)
sdf.plot(ax=ax, subplots=False, sharex=True, grid=True)
ax.grid(True, which="both")
ax.set_title("Evolution of COVID-19 cases in department " + dep)
img_name = dep + ".png"
ax.xaxis.set_label_text("")
f.savefig(img_name)
plt.show()
###Output
_____no_output_____
|
slides/DataVis-06Py.ipynb
|
###Markdown
Visualization Techniques (Projections) Venustiano Soancatl Aguilar Center for Information Technology University of Groningen, the Netherlands Projections, given a table of $m$ rows and $n$ columns,- a row reprents an $n-dimensional$ point,- a row can be represented/mapped as a $2D$ point- point-to-point distance (in $2D$) shows similarity of table rows (in $nD$)- coloring points by one attribute can show groupsPrincipal component analysis (PCA)1. compute $n$ eigenvectors $e_i$ and eigenvalues $w_i$ of the $m$ table rows2. select the two eigenvectors $e_i$ for the two largest eigenvalues $w_i$3. project the $nD$ points on a $2D$ plane spanned by the two largest eigenvectorspro’s: simple to compute, many tools support this (linear) methodcon’s: 2D distances may not accurately reflect $nD$ distances The data
###Code
import pandas as pd
# Data
tableRes = pd.read_csv('data/results.txt',sep=' ')
tableRes
###Output
_____no_output_____
###Markdown
Selecting columns to normalize
###Code
cols_to_norm = tableRes.columns[4:15]
cols_to_norm
###Output
_____no_output_____
###Markdown
Normalizing the data
###Code
tableRes[cols_to_norm] = tableRes[cols_to_norm].transform(lambda value: (value - value.mean()) / value.std())
tableRes.head()
###Output
_____no_output_____
###Markdown
Using `R` for PCA
###Code
# activate R magic
%load_ext rpy2.ipython
%%R
# r imports …
library(ggplot2)
library(data.table)
###Output
_____no_output_____
###Markdown
`prcomp`
###Code
%%R -i tableRes
tpca <- prcomp(tableRes[,5:15])
#print(str(balance.pca))
tpca
###Output
Standard deviations (1, .., p=11):
[1] 2.41173960 1.71307470 0.98895667 0.91062388 0.37032009 0.35355737
[7] 0.26429141 0.25548704 0.15100026 0.12298269 0.08016767
Rotation (n x k) = (11 x 11):
PC1 PC2 PC3 PC4 PC5
medLrms51 0.05971084 -0.53757150 -0.31886975 0.01290679 -0.22309384
medTI51 0.26162265 -0.19597664 -0.03471394 -0.73464487 0.34953967
medK -0.36843362 0.19195554 0.02969850 -0.26195206 -0.09271083
meanK -0.35584700 0.15390292 -0.24456723 -0.37430522 -0.17410174
medSpeed 0.36050703 -0.15369588 0.28480403 0.26293426 -0.17036643
medTIms51 -0.39049317 0.12406172 -0.16758189 0.08319465 0.00148884
medLcovD51 0.10189172 0.54217342 -0.10147216 0.11932485 -0.08720354
medLsd51 0.37458176 0.02188806 -0.30760090 -0.21941535 -0.30778866
medLcov51 0.32234471 0.32630834 -0.07510445 -0.15663402 -0.57921111
medLrmslD51 -0.26605790 -0.36743670 -0.37442296 0.15458067 -0.32794638
medLsdD51 0.24037724 0.19273118 -0.69027294 0.25404795 0.46340572
PC6 PC7 PC8 PC9 PC10
medLrms51 -0.19932745 0.35061296 -0.3523983 0.26292013 -0.178449964
medTI51 -0.38852965 0.02280413 0.2698161 -0.01080795 0.007880793
medK 0.17450740 0.69869694 0.1080169 -0.43415464 -0.157085821
meanK 0.17351756 0.01394028 -0.1438506 0.43747398 0.607913551
medSpeed 0.02586282 0.47224438 0.4756079 0.25711892 0.369810093
medTIms51 -0.10293663 -0.11703397 0.6291363 0.42701592 -0.367223863
medLcovD51 -0.59053037 0.25761816 -0.2362108 0.20891579 -0.163414852
medLsd51 0.51013797 -0.01645476 0.0598884 0.14751478 -0.426465907
medLcov51 -0.15686632 -0.21662253 0.1511338 -0.26112260 0.156389822
medLrmslD51 -0.29502884 -0.12726616 0.2307516 -0.38117033 0.139956770
medLsdD51 0.12988972 0.14733363 0.1147555 -0.15345188 0.222761160
PC11
medLrms51 0.40990160
medTI51 -0.07487087
medK 0.07648161
meanK -0.08671689
medSpeed -0.11993174
medTIms51 0.24457142
medLcovD51 -0.34969644
medLsd51 -0.39240571
medLcov51 0.48676461
medLrmslD51 -0.44855680
medLsdD51 0.15618624
###Markdown
Plotting PCAhttps://cran.r-project.org/web/packages/ggfortify/vignettes/plot_pca.html[ggfortify](http://www.sthda.com/english/wiki/ggfortify-extension-to-ggplot2-to-handle-some-popular-packages-r-software-and-data-visualization)
###Code
%%R
install.packages("ggfortify",repos = "http://cran.us.r-project.org")
# Add this package to your docker file to have it permanently
%%R
library(ggfortify)
autoplot(tpca)
###Output
_____no_output_____
###Markdown
Additional features
###Code
%%R
tmp = as.data.table(tableRes)
tmp[,old:=ifelse(Age < 61, 'Younger', 'Older')]
fig <- autoplot(tpca, data = tmp, colour = 'old',loadings = TRUE, loadings.colour = 'blue',
loadings.label = TRUE, loadings.label.size = 5) +
theme_bw()
%%R -w 800 -h 600 -u px
fig
###Output
_____no_output_____
###Markdown
The step by step way
###Code
%%R
balancePcaPlot <- tmp[,.(iSubj,trial,Age,Decade,old)]
balancePcaPlot[,PCA1:=tpca$x[,1]]
balancePcaPlot[,PCA2:=tpca$x[,2]]
###Output
iSubj trial Age Decade old PCA1 PCA2
1: 1 1 76 7 Older -0.3940074 0.7063803
2: 1 2 76 7 Older -0.4690613 -0.3174284
3: 1 3 76 7 Older 3.3567930 0.2727497
4: 1 4 76 7 Older 3.5928590 0.5645955
5: 1 5 76 7 Older 2.5631942 0.9606240
---
396: 40 6 58 5 Younger -3.1612037 -2.1700856
397: 40 7 58 5 Younger -4.6910554 -2.4443562
398: 40 8 58 5 Younger -3.9356359 -1.9407878
399: 40 9 58 5 Younger -3.3001137 -2.3003477
400: 40 10 58 5 Younger -2.3693058 -2.2456987
###Markdown
ggplot2
###Code
%%R
bpp <- ggplot(balancePcaPlot,aes(PCA1,PCA2,group=old))
bpp <- bpp + geom_point(aes(colour=old),size=3,alpha=0.5)+
stat_ellipse(size=0.3)+
theme_bw()+
coord_fixed(ratio = 1)
###Output
_____no_output_____
###Markdown
The result
###Code
%%R -w 800 -h 600 -u px
bpp # to add the biplot, look at ./data/figures.R
###Output
_____no_output_____
###Markdown
R variables to python
###Code
%%R -o balancePcaPlot
balancePcaPlot
###Output
iSubj trial Age Decade old PCA1 PCA2
1: 1 1 76 7 Older -0.3940074 0.7063803
2: 1 2 76 7 Older -0.4690613 -0.3174284
3: 1 3 76 7 Older 3.3567930 0.2727497
4: 1 4 76 7 Older 3.5928590 0.5645955
5: 1 5 76 7 Older 2.5631942 0.9606240
---
396: 40 6 58 5 Younger -3.1612037 -2.1700856
397: 40 7 58 5 Younger -4.6910554 -2.4443562
398: 40 8 58 5 Younger -3.9356359 -1.9407878
399: 40 9 58 5 Younger -3.3001137 -2.3003477
400: 40 10 58 5 Younger -2.3693058 -2.2456987
###Markdown
Now you can use `balancePcaPlot` in python
###Code
balancePcaPlot
import plotly.graph_objects as go
fig = go.Figure()
fig.add_trace(go.Scatter(x=balancePcaPlot['PCA1'],
y=balancePcaPlot['PCA2'],
marker = dict(size=balancePcaPlot['Decade']*2),
mode='markers'))
###Output
_____no_output_____
|
Databases and SQL for Data Science/DB0201EN-Week4-2-2-PeerAssign-v5-py.ipynb
|
###Markdown
Assignment: Notebook for Peer Assignment IntroductionUsing this Python notebook you will:1. Understand 3 Chicago datasets 1. Load the 3 datasets into 3 tables in a Db2 database1. Execute SQL queries to answer assignment questions Understand the datasets To complete the assignment problems in this notebook you will be using three datasets that are available on the city of Chicago's Data Portal:1. Socioeconomic Indicators in Chicago1. Chicago Public Schools1. Chicago Crime Data 1. Socioeconomic Indicators in ChicagoThis dataset contains a selection of six socioeconomic indicators of public health significance and a “hardship index,” for each Chicago community area, for the years 2008 – 2012.For this assignment you will use a snapshot of this dataset which can be downloaded from:https://ibm.box.com/shared/static/05c3415cbfbtfnr2fx4atenb2sd361ze.csvA detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at:https://data.cityofchicago.org/Health-Human-Services/Census-Data-Selected-socioeconomic-indicators-in-C/kn9c-c2s2 2. Chicago Public SchoolsThis dataset shows all school level performance data used to create CPS School Report Cards for the 2011-2012 school year. This dataset is provided by the city of Chicago's Data Portal.For this assignment you will use a snapshot of this dataset which can be downloaded from:https://ibm.box.com/shared/static/f9gjvj1gjmxxzycdhplzt01qtz0s7ew7.csvA detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at:https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t 3. Chicago Crime Data This dataset reflects reported incidents of crime (with the exception of murders where data exists for each victim) that occurred in the City of Chicago from 2001 to present, minus the most recent seven days. This dataset is quite large - over 1.5GB in size with over 6.5 million rows. For the purposes of this assignment we will use a much smaller sample of this dataset which can be downloaded from:https://ibm.box.com/shared/static/svflyugsr9zbqy5bmowgswqemfpm1x7f.csvA detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at:https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2 Download the datasetsIn many cases the dataset to be analyzed is available as a .CSV (comma separated values) file, perhaps on the internet. Click on the links below to download and save the datasets (.CSV files):1. __CENSUS_DATA:__ https://ibm.box.com/shared/static/05c3415cbfbtfnr2fx4atenb2sd361ze.csv1. __CHICAGO_PUBLIC_SCHOOLS__ https://ibm.box.com/shared/static/f9gjvj1gjmxxzycdhplzt01qtz0s7ew7.csv1. __CHICAGO_CRIME_DATA:__ https://ibm.box.com/shared/static/svflyugsr9zbqy5bmowgswqemfpm1x7f.csv__NOTE:__ Ensure you have downloaded the datasets using the links above instead of directly from the Chicago Data Portal. The versions linked here are subsets of the original datasets and have some of the column names modified to be more database friendly which will make it easier to complete this assignment. Store the datasets in database tablesTo analyze the data using SQL, it first needs to be stored in the database.While it is easier to read the dataset into a Pandas dataframe and then PERSIST it into the database as we saw in Week 3 Lab 3, it results in mapping to default datatypes which may not be optimal for SQL querying. For example a long textual field may map to a CLOB instead of a VARCHAR. Therefore, __it is highly recommended to manually load the table using the database console LOAD tool, as indicated in Week 2 Lab 1 Part II__. The only difference with that lab is that in Step 5 of the instructions you will need to click on create "(+) New Table" and specify the name of the table you want to create and then click "Next". Now open the Db2 console, open the LOAD tool, Select / Drag the .CSV file for the first dataset, Next create a New Table, and then follow the steps on-screen instructions to load the data. Name the new tables as folows:1. __CENSUS_DATA__1. __CHICAGO_PUBLIC_SCHOOLS__1. __CHICAGO_CRIME_DATA__ Connect to the database Let us first load the SQL extension and establish a connection with the database
###Code
%load_ext sql
###Output
_____no_output_____
###Markdown
In the next cell enter your db2 connection string. Recall you created Service Credentials for your Db2 instance in first lab in Week 3. From the __uri__ field of your Db2 service credentials copy everything after db2:// (except the double quote at the end) and paste it in the cell below after ibm_db_sa://
###Code
# Remember the connection string is of the format:
# %sql ibm_db_sa://my-username:my-password@my-hostname:my-port/my-db-name
# Enter the connection string for your Db2 on Cloud database instance below
%sql ibm_db_sa://bdk11537:drrlf%40kcqts1v8bb@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
%sql select TABSCHEMA, TABNAME, CREATE_TIME from SYSCAT.TABLES where TABSCHEMA = 'BDK11537'
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
ProblemsNow write and execute SQL queries to solve assignment problems Problem 1 Find the total number of crimes recorded in the CRIME table
###Code
# Rows in Crime table
%sql select count(*) from CHICAGO_CRIME_DATE;
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 2 Retrieve first 10 rows from the CRIME table
###Code
%sql select * from CHICAGO_CRIME_DATE LIMIT 10;
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 3 How many crimes involve an arrest?
###Code
%sql SELECT COUNT(*) from CHICAGO_CRIME_DATE WHERE arrest = 'TRUE';
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 4 Which unique types of crimes have been recorded at GAS STATION locations?
###Code
%sql select distinct(primary_type) from CHICAGO_CRIME_DATE where location_description = 'GAS STATION'
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Hint: Which column lists types of crimes e.g. THEFT? Problem 5 In the CENUS_DATA table list all Community Areas whose names start with the letter ‘B’.
###Code
%sql select community_area_name from CENSUS_DATA where community_area_name like 'B%';
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 6 Which schools in Community Areas 10 to 15 are healthy school certified?
###Code
%%sql
select name_of_school
from SCHOOLS
WHERE (community_area_number BETWEEN 10 and 15) and healthy_school_certified = 'Yes'
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 7 What is the average school Safety Score?
###Code
%%sql
select AVG(safety_score) AS AVG_SAFETY_SCORE
from SCHOOLS
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 8 List the top 5 Community Areas by average College Enrollment [number of students]
###Code
%%sql
select community_area_name, AVG(college_enrollment) as AVG_College_Enrollment
from SCHOOLS
group by community_area_name
order by AVG(college_enrollment) desc
LIMIT 5;
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 9 Use a sub-query to determine which Community Area has the least value for school Safety Score?
###Code
%sql select community_area_name from SCHOOLS where safety_score = (select min(safety_score) from SCHOOLS)
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 10 [Without using an explicit JOIN operator] Find the Per Capita Income of the Community Area which has a school Safety Score of 1.
###Code
%%sql
select community_area_name, per_capita_income from CENSUS_DATA
where ucase(community_area_name) in (select ucase(community_area_name) from SCHOOLS where safety_score = 1)
###Output
* ibm_db_sa://bdk11537:***@dashdb-txn-sbox-yp-dal09-04.services.dal.bluemix.net:50000/BLUDB
Done.
###Markdown
Assignment: Notebook for Peer Assignment IntroductionUsing this Python notebook you will:1. Understand 3 Chicago datasets 1. Load the 3 datasets into 3 tables in a Db2 database1. Execute SQL queries to answer assignment questions Understand the datasets To complete the assignment problems in this notebook you will be using three datasets that are available on the city of Chicago's Data Portal:1. Socioeconomic Indicators in Chicago1. Chicago Public Schools1. Chicago Crime Data 1. Socioeconomic Indicators in ChicagoThis dataset contains a selection of six socioeconomic indicators of public health significance and a “hardship index,” for each Chicago community area, for the years 2008 – 2012.For this assignment you will use a snapshot of this dataset which can be downloaded from:https://ibm.box.com/shared/static/05c3415cbfbtfnr2fx4atenb2sd361ze.csvA detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at:https://data.cityofchicago.org/Health-Human-Services/Census-Data-Selected-socioeconomic-indicators-in-C/kn9c-c2s2 2. Chicago Public SchoolsThis dataset shows all school level performance data used to create CPS School Report Cards for the 2011-2012 school year. This dataset is provided by the city of Chicago's Data Portal.For this assignment you will use a snapshot of this dataset which can be downloaded from:https://ibm.box.com/shared/static/f9gjvj1gjmxxzycdhplzt01qtz0s7ew7.csvA detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at:https://data.cityofchicago.org/Education/Chicago-Public-Schools-Progress-Report-Cards-2011-/9xs2-f89t 3. Chicago Crime Data This dataset reflects reported incidents of crime (with the exception of murders where data exists for each victim) that occurred in the City of Chicago from 2001 to present, minus the most recent seven days. This dataset is quite large - over 1.5GB in size with over 6.5 million rows. For the purposes of this assignment we will use a much smaller sample of this dataset which can be downloaded from:https://ibm.box.com/shared/static/svflyugsr9zbqy5bmowgswqemfpm1x7f.csvA detailed description of this dataset and the original dataset can be obtained from the Chicago Data Portal at:https://data.cityofchicago.org/Public-Safety/Crimes-2001-to-present/ijzp-q8t2 Download the datasetsIn many cases the dataset to be analyzed is available as a .CSV (comma separated values) file, perhaps on the internet. Click on the links below to download and save the datasets (.CSV files):1. __CENSUS_DATA:__ https://ibm.box.com/shared/static/05c3415cbfbtfnr2fx4atenb2sd361ze.csv1. __CHICAGO_PUBLIC_SCHOOLS__ https://ibm.box.com/shared/static/f9gjvj1gjmxxzycdhplzt01qtz0s7ew7.csv1. __CHICAGO_CRIME_DATA:__ https://ibm.box.com/shared/static/svflyugsr9zbqy5bmowgswqemfpm1x7f.csv__NOTE:__ Ensure you have downloaded the datasets using the links above instead of directly from the Chicago Data Portal. The versions linked here are subsets of the original datasets and have some of the column names modified to be more database friendly which will make it easier to complete this assignment. Store the datasets in database tablesTo analyze the data using SQL, it first needs to be stored in the database.While it is easier to read the dataset into a Pandas dataframe and then PERSIST it into the database as we saw in Week 3 Lab 3, it results in mapping to default datatypes which may not be optimal for SQL querying. For example a long textual field may map to a CLOB instead of a VARCHAR. Therefore, __it is highly recommended to manually load the table using the database console LOAD tool, as indicated in Week 2 Lab 1 Part II__. The only difference with that lab is that in Step 5 of the instructions you will need to click on create "(+) New Table" and specify the name of the table you want to create and then click "Next". Now open the Db2 console, open the LOAD tool, Select / Drag the .CSV file for the first dataset, Next create a New Table, and then follow the steps on-screen instructions to load the data. Name the new tables as folows:1. __CENSUS_DATA__1. __CHICAGO_PUBLIC_SCHOOLS__1. __CHICAGO_CRIME_DATA__ Connect to the database Let us first load the SQL extension and establish a connection with the database
###Code
%load_ext sql
###Output
_____no_output_____
###Markdown
In the next cell enter your db2 connection string. Recall you created Service Credentials for your Db2 instance in first lab in Week 3. From the __uri__ field of your Db2 service credentials copy everything after db2:// (except the double quote at the end) and paste it in the cell below after ibm_db_sa://
###Code
# Remember the connection string is of the format:
# %sql ibm_db_sa://my-username:my-password@my-hostname:my-port/my-db-name
# Enter the connection string for your Db2 on Cloud database instance below
%sql ibm_db_sa://gnn77376:5145b%5Ecqrwp9nz0t@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
###Output
_____no_output_____
###Markdown
ProblemsNow write and execute SQL queries to solve assignment problems Problem 1 Find the total number of crimes recorded in the CRIME table
###Code
# Rows in Crime table
%sql select count(id) from CHICAGO_crime
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
There are 533 cases, so 533 crimes. Problem 2 Retrieve first 10 rows from the CRIME table
###Code
%sql select * from chicago_crime limit 10
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 3 How many crimes involve an arrest?
###Code
%%sql select count(distinct(id)) from chicago_crime
where arrest = TRUE
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 4 Which unique types of crimes have been recorded at GAS STATION locations?
###Code
%%sql select distinct(primary_type), location_description from chicago_crime
where location_description = 'GAS STATION'
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
4 unique types of crimes Hint: Which column lists types of crimes e.g. THEFT? Problem 5 In the CENUS_DATA table list all Community Areas whose names start with the letter ‘B’.
###Code
%%sql select COMMUNITY_AREA_NAME as "Community Area Names" from CHICAGO_SOCIOECONOMIC_DATA
where COMMUNITY_AREA_NAME like 'B%'
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 6 Which schools in Community Areas 10 to 15 are healthy school certified?
###Code
%%sql
select name_of_school, community_area_number, HEALTHY_SCHOOL_CERTIFIED from chicago_school
where HEALTHY_SCHOOL_CERTIFIED = 'Yes' and community_area_number between 10 and 15
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 7 What is the average school Safety Score?
###Code
%sql select avg(safety_score) from chicago_school
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 8 List the top 5 Community Areas by average College Enrollment [number of students]
###Code
%%sql
select COMMUNITY_AREA_NAME as "Community area name", avg(COLLEGE_ENROLLMENT) as "Average college enrollment" from chicago_school
group by COMMUNITY_AREA_NAME
order by avg(COLLEGE_ENROLLMENT) desc
limit 5
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 9 Use a sub-query to determine which Community Area has the least value for school Safety Score?
###Code
%%sql
select COMMUNITY_AREA_NAME as "Community area name", SAFETY_SCORE as "Safety score" from chicago_school
where SAFETY_SCORE = (select min(SAFETY_SCORE) from chicago_school)
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
###Markdown
Problem 10 [Without using an explicit JOIN operator] Find the Per Capita Income of the Community Area which has a school Safety Score of 1.
###Code
%%sql
select PER_CAPITA_INCOME_ as "Per Capita Income" from CHICAGO_SOCIOECONOMIC_DATA CSD, chicago_school CS
where CSD.CA = CS.COMMUNITY_AREA_NUMBER and SAFETY_SCORE = 1
###Output
* ibm_db_sa://gnn77376:***@dashdb-txn-sbox-yp-lon02-01.services.eu-gb.bluemix.net:50000/BLUDB
Done.
|
06-Built-in-Data-Structures.ipynb
|
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* 組み込みのデータ構造 Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:Pythonのシンプルな型を見てきました。``int``, ``float``, ``complex``, ``bool``, ``str`` などです。Python にはいくつかの複合型が組み込まれており、他の型のコンテナとして機能します。これらの複合型は以下の通りです。| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here.ご覧のように、丸括弧、四角括弧、中括弧は、生成されるコレクションの種類によって異なる意味を持っています。ここでは、これらのデータ構造について簡単に説明します。 リスト ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:リストは Python の基本的な *ordered* および *mutable* データ収集型です。これらのリストは、角括弧の間にカンマで区切られた値で定義することができます; 例えば、以下は最初の数個の素数のリストです。
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:リストには多くの便利なプロパティやメソッドがあります。ここでは、より一般的で便利なプロパティを簡単に見てみましょう。
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:Python の [オンラインドキュメント](https://docs.python.org/3/tutorial/datastructures.html) で十分に説明されています。ここまでは単一の型の値を含むリストのデモをしてきましたが、Pythonの複合オブジェクトの強力な機能の一つは、*any*型のオブジェクトを含むことができること、あるいは型が混在しているオブジェクトを含むことができることです。例えば、以下のようになります。
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next.この柔軟性は Python の動的型システムの結果です。C言語のような静的型付けされた言語でこのような混合シーケンスを作成するのは、頭の痛い問題です。リストは他のリストを要素として含むこともできます。このような型の柔軟性は、Python のコードを比較的速く簡単に書くための重要な要素です。ここまではリスト全体の操作について考えてきましたが、もう一つの重要な要素は個々の要素へのアクセスです。これは Python で *indexing* と *slicing* を介して行われます。 リストのインデックス作成とスライス List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:Pythonは、単一の要素に対しては*indexing*、複数の要素に対しては*slicing*で複合型の要素へのアクセスを提供しています。見ての通り、両方とも角括弧の構文で示されています。最初の数個の素数のリストに戻るとしましょう。
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:Pythonは*zero-based*インデックスを使用しているので、以下の構文を使用して1番目と2番目の要素にアクセスすることができます。
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:リストの最後にある要素は、-1から始まる負の数でアクセスできます。
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:このようにして、このインデックス化スキームを可視化することができます。  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``.ここではリストの値は四角の中の大きな数字で表され、リストのインデックスは上下の小さな数字で表されます。この場合、 ``L[2]``はインデックス ``2`` の次の値である ``5`` を返します。 Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:*indexing*がリストから単一の値を取得する手段であるのに対し、*slicing*はサブリスト内の複数の値にアクセスする手段です。部分配列の開始点(包含)と終了点(非包含)を示すためにコロンを使用します。例えば、リストの最初の3つの要素を取得するには、次のように書きます。
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:上の図で ``0`` と ``3`` がどこにあるか、そしてスライスがどのようにインデックス間の値だけを取るかに注目してください。最初のインデックスを省略すると ``0`` が仮定されるので、等価的に書くことができます。
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:同様に、最後のインデックスを省略すると、リストの長さがデフォルトになります。したがって、最後の3つの要素は以下のようにアクセスできます。
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:最後に、ステップサイズを表す3番目の整数を指定することができます。例えば、リストの2番目の要素をすべて選択するには、次のように書きます。
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:特に便利なのは、負のステップを指定して配列を反転させることです。
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:インデックス作成とスライスの両方とも、要素の設定とアクセスに使用することができます。構文は期待通りです。
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier.非常に似たようなスライシング構文は、NumPyやPandasなどのデータサイエンス向けの多くのパッケージでも使用されています(紹介文でも触れました)。さて、Pythonのリストと、順序付き複合型の要素へのアクセス方法を見てきましたが、先に述べた他の3つの標準的な複合型データ型を見てみましょう。 タプル TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:タプルは多くの点でリストに似ていますが、角括弧ではなく括弧で定義されています。
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:また、括弧を全く付けずに定義することもできます。
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:前に説明したリストと同様に、タプルは長さを持ち、個々の要素は角括弧インデックスを使って抽出することができます。
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:タプルの主な特徴は、*immutable*であるということです:これは、一度作成されたタプルはサイズや内容を変更できないことを意味します。
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:Pythonのプログラムではタプルがよく使われますが、特に一般的なのは複数の戻り値を持つ関数です。例えば、浮動小数点オブジェクトの ``as_integer_ratio()`` メソッドは分子と分母を返します。
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these.先ほどリストのために説明したインデックス作成とスライスのロジックは、他の多くのメソッドと同様にタプルでも動作します。これらのより完全なリストについては、オンラインの [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) を参照してください。 辞書 DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:辞書はキーと値の非常に柔軟なマッピングであり、Pythonの内部実装の多くの基礎を形成しています。辞書は中括弧の中にある ``key:value`` のペアをカンマで区切ったリストで作成することができます。
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:リストやタプルに使用されるインデックス構文を使用してアイテムにアクセスしたり設定したりします。
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:インデックスを使用して辞書に新しい項目を追加することもできます。
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries.辞書は入力パラメータの順序を保持していないことを覚えておいてください。このように順序がないため、辞書を非常に効率的に実装することができ、辞書のサイズに関係なく、ランダムな要素へのアクセスが非常に高速になります (これがどのように動作するのか興味がある場合は、*hash table*の概念を読んでください)。The [python documentation](https://docs.python.org/3/library/stdtypes.html) に、辞書で利用可能なメソッドの完全なリストがあります。 セット SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:4 番目の基本的なコレクションはセットで、ユニークなアイテムの順序なしのコレクションを含みます。これらは、辞書の中括弧を使用する以外は、リストやタプルとよく似て定義されています。
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:集合の数学に精通している人ならば、和、交点、差、対称差などの演算に精通しているでしょう。Pythonの集合には、メソッドや演算子を介してこれらの演算が組み込まれています。それぞれについて、2つの等価なメソッドを紹介します。
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
Built-in Data StructuresWe have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, etc.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------|| ``list`` | ``[1, 2, 3]`` | ordered collection || ``tuple`` | ``(1, 2, 3)`` | immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | unordered collection |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List Indexing and SlicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the above diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus the last three elements can be accessed as follows
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer which represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned above. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed above, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are **immutable**: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions which have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered above for lists works for tuples as well, along with a host of other methods.Refer to the online Python documentation for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key, value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort
L
L.sort??
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. Shallow copy: ``` list.copy()``` - A shallow copy constructs a new compound object and then (to the extent possible) inserts references into it to the objects found in the original. - A deep copy constructs a new compound object and then, recursively, inserts copies into it of the objects found in the original. see: https://docs.python.org/3/library/copy.html
###Code
import copy
x = [1,2]
y = [x,3,4]
zs = copy.copy(y)
zd = copy.deepcopy(y)
#noted that at this point both shallow and deep copy are the same
print(f'shallow copy: {zs}')
print(f'deep copy: {zd}')
#however if we modify x we see that shallow copy changes but deep copy does not
x.append(100)
print(f'shallow copy: {zs}')
print(f'deep copy: {zd}')
###Output
shallow copy: [[1, 2, 100], 3, 4]
deep copy: [[1, 2], 3, 4]
###Markdown
List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (**non-inclusive**) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125658123123123
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125658123123123
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. List can serve as queue - by using ```list.insert``` and ```list.pop```
###Code
L = [1,2,3]
print(L.pop(0))
print(L)
L.insert(0,1)
print(L)
import timeit
start = 'L = [list(range(1,1000000))]'
op = 'L.pop(0);L.insert(0,1)'
t_list = timeit.timeit(stmt=op, setup=start,number=100)
print(t_list)
###Output
0.01463799999874027
###Markdown
Lets compare it with same operation done via ```deque```
###Code
from collections import deque
L = deque(list(range(1,10)))
L.popleft()
print(list(L))
L.appendleft(1)
print(list(L))
start = 'from collections import deque;L = deque(list(range(1,1000000)))'
op = 'L.popleft();L.appendleft(1)'
t_deque = timeit.timeit(stmt=op, setup=start,number=100)
print(t_deque)
#deque time is faster
print(t_list/t_deque)
###Output
944.3871094673706
###Markdown
DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's *internal implementation*.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
mykey = 'two'
mykey in numbers
mykeys = ('two','one')
[x in numbers for x in mykeys]
###Output
_____no_output_____
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + L
L.extend(L) # L + L
L
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
print(list(range(0, 11, 3)))
###Output
[0, 3, 6, 9]
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[2:7:2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
L.reverse()
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
L[-1] = 42
print(L)
print(L[1:3])
L[1:3] = [55, 56]
print(L)
###Output
[1, 2]
[100, 55, 56, 3, 4, 5, 6, 7, 8, 9, 42]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
t + t
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
(t[0],) + (4,) + t[2:]
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
d = {}
d['uno'] = 1
d['dos'] = 2
d
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
Many more set methods and operations are available.You've probably already guessed what I'll say next: refer to Python's [online documentation](https://docs.python.org/3/library/stdtypes.html) for a complete reference. More Specialized Data StructuresPython contains several other data structures that you might find useful; these can generally be found in the built-in ``collections`` module.The collections module is fully-documented in [Python's online documentation](https://docs.python.org/3/library/collections.html), and you can read more about the various objects available there.In particular, I've found the following very useful on occasion:- ``collections.namedtuple``: Like a tuple, but each value has a name- ``collections.defaultdict``: Like a dictionary, but unspecified keys have a user-specified default value- ``collections.OrderedDict``: Like a dictionary, but the order of keys is maintainedOnce you've seen the standard built-in collection types, the use of these extended functionalities is very intuitive, and I'd suggest [reading about their use](https://docs.python.org/3/library/collections.html).
###Code
import collections
dir(collections)
###Output
_____no_output_____
###Markdown
Built-In Data Structures 内建数据结构> We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:> | Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |我们已经看过了Python的简单类型:``int``, ``float``, ``complex``, ``bool``, ``str`` 等等。Python同样有许多内建的复杂类型,作为提供给其他类型的容器。这些复杂类型是:| 名称 | 例子 |描述 ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | 有序集合 || ``tuple`` | ``(1, 2, 3)`` | 不可变的有序集合 || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | 无序的映射 || ``set`` | ``{1, 2, 3}`` | 无序且不重复的集合 |> As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here.你将会看到,当我们使用这些复合类型时,小括号、中括号和大括号有着它们独特的意义。我们接下来会快速的浏览一遍这些数据结构。 Lists 列表> Lists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:列表是基本的*有序*及*可变*的数据集合类型。列表可以使用中括号中的逗号分隔的元素来定义;例如,下面定义了一个列表含有一些质数:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
> Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:列表有许多有用的属性和方法。下面我们快速地看看其中一些最常用和有用的:
###Code
# Length of a list
# 列表的长度
len(L)
# Append a value to the end
# 在列表末尾添加元素
L.append(11)
L
# Addition concatenates lists
# 列表的连接
L + [13, 17, 19]
# sort() method sorts in-place
# 排序
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
> In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).还有很多内建的列表方法;你可以在Python的[在线文档](https://docs.python.org/3/tutorial/datastructures.html)中找到它们。> While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:虽然我们展示的列表都只包含同一种类型的元素,实际上,Python复合类型最强大的特性之一就是它们可以包含*任何*类型的元素,甚至包含复合类型本身。例如:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
> This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.这种灵活性实际上是Python动态类型系统的结果。在静态类型语言中(如C)创建这样的混合类型集合会比在Python中头疼许多!我们在上面的例子中看到列表可以包含另外一个列表作为它的元素。这种灵活性是使得Python代码能够更快和容易编写的关键因素。> So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next.目前我们只是看到了如何将列表作为一个整体进行操作;还有一个核心的操作是访问和操作列表中的单个或部分元素。在Python中,我们通过*索引*和*切片*操作实现,我们现在来研究一下。 List indexing and slicing 列表索引和切片> Python provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:Python提供了*索引*操作用来访问复合类型中的单个元素,*切片*操作用来访问多个元素。我们将会看到,这两个操作都使用中括号语法。我们回到之前那个质数的列表:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
> Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:Python使用*0基准*的索引,因此我们可以通过以下语法访问列表的第一个和第二个元素:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
> Elements at the end of the list can be accessed with negative numbers, starting from -1:列表末尾的元素可以使用负数进行访问,从-1开始:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
> You can visualize this indexing scheme this way:你可以通过下面的图了解索引:  > Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``.这里列表中的元素值使用方格中的大数字表示;列表的索引表示成上面和下面的小数字。在这里,``L[2]`` 得到 ``5``,因为它是索引``2``的下一个元素值。 > Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:*索引*是从列表中访问单个元素值的方法,*切片*是取得列表多个元素值或者叫子列表的方法。使用冒号指明开始索引(包含)和结束索引(不包含)来进行切片。例如,要活的列表的头三个元素值,我们可以写成:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
> Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:注意前面那张图中的``0`` 和 ``3``的位置,和切片操作如何在索引之间取得元素的。如果我们将开始索引留空,将默认为``0``,因此我们也可以等同的写成:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
> Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:同样的,如果我们将结束索引留空,将默认为列表的长度。因此,列表最后三个元素可以如下方式访问:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
> Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:最后,可以使用第三个整数表示切片的步长;例如,要选择列表中的偶数索引元素的话,我们可以写为:
###Code
L[::2] # equivalent to L[0:len(L):2] 等同于L[0:len(L):2]
###Output
_____no_output_____
###Markdown
> A particularly useful version of this is to specify a negative step, which will reverse the array:可以将步长指定为负数,这样的做法将会反向切片列表,非常有用:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
> Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:索引和切片操作除了访问元素值外,还可以用来设置元素值。你可以预见到语法:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
> A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).这些索引和切片的语法会在其他一些数据科学常用的包当中使用,包括NumPy和Pandas,基本没有太大语法变化。> Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier.现在我们学习了Python的列表,以及如何在有序的复合类型中访问元素。让我们继续讨论其他的三个标准复合数据类型。 Tuples 元组> Tuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:元组在许多方面都与列表近似,但是元组使用小括号进行定义而不是中括号:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
> They can also be defined without any brackets at all:元组也可以不使用任何括号进行定义:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
> Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:就像之前讨论的列表,元组有一个长度,也可以使用方括号语法进行索引或切片:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
> The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:元组最重要的特性是它们是*不可变*的。这意味着,一旦元组创建之后,它的长度和它包含的元素值都是不能改变的:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
> Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:元组在Python中使用广泛;特别是在函数返回多个值时。例如,浮点数的``as_integer_ratio()``函数会返回分子和分母;这两个返回值会作为元组返回:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
> These multiple return values can be individually assigned as follows:这些函数返回的多个值可以单独地赋值给变量:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
> The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these.元组中索引和切片操作的使用与列表一样,其他的一些方法也是。参考在线[Python文档](https://docs.python.org/3/tutorial/datastructures.html)以获得更完整的信息。 Dictionaries 字典> Dictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:字典是非常灵活的键值对映射关系,它是许多Python内部实现的基础。字典可以通过大括号内的逗号分隔的``键:值``对的方式来创建:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
> Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:访问字典的元素时,通过与列表和元组相似的索引操作来进行,区别是这里的索引不是0开始的整数,而是字典中相应的一个键:
###Code
# Access a value via the key
# 通过键访问字典值
numbers['two']
###Output
_____no_output_____
###Markdown
> New items can be added to the dictionary using indexing as well:新的键值对可以通过索引操作加入字典:
###Code
# Set a new key:value pair
# 加入一个新的键值对
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
> Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries.记住字典不会保留任何的顺序信息。字典不记录顺序这个特性使得它非常有效,随机访问元素非常快,基本不在乎字典的大小(如果你对此有兴趣,请阅读*哈希表*的相关概念)。在线[python文档](https://docs.python.org/3/library/stdtypes.html)提供了字典完整的方法列表。 Sets 集> The fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:第四个基础集合类型就是set,包含着无序的且非重复的元素。set定义的语法与列表和元组类似,除了它使用的是和字典一样的大括号:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
> If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:如果你熟悉数学上的集合,你就会熟悉集合的有关操作,如并集、交集、差集、对称差集等。Python内建了所有的这些操作,通过方法或运算符。每一个操作我们都能看到两个效果相同的方法:
###Code
# union: items appearing in either
# 并集
primes | odds # with an operator 使用运算符
primes.union(odds) # equivalently with a method 使用方法
# intersection: items appearing in both
# 交集
primes & odds # with an operator 使用运算符
primes.intersection(odds) # equivalently with a method 使用方法
# difference: items in primes but not in odds
# 差集
primes - odds # with an operator 使用运算符
primes.difference(odds) # equivalently with a method 使用方法
# symmetric difference: items appearing in only one set
# 对称差集
primes ^ odds # with an operator 使用运算符
primes.symmetric_difference(odds) # equivalently with a method 使用方法
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, "two", 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
_____no_output_____
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
_____no_output_____
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
_____no_output_____
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {"one": 1, "two": 2, "three": 3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers["two"]
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers["ninety"] = 90
print(numbers)
###Output
_____no_output_____
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*This notebook comes from [A Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas (OReilly Media, 2016). This content is licensed [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE). The full notebook listing is available at https://github.com/jakevdp/WhirlwindTourOfPython.* Built-In Data StructuresWe have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'one': 1, 'two': 2, 'three': 3, 'ninety': 90}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
L.extend(L) #L + L
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
Many more set methods and operations are available.You've probably already guessed what I'll say next: refer to Python's [online documentation](https://docs.python.org/3/library/stdtypes.html) for a complete reference. More Specialized Data StructuresPython contains several other data structures that you might find useful; these can generally be found in the built-in ``collections`` module.The collections module is fully-documented in [Python's online documentation](https://docs.python.org/3/library/collections.html), and you can read more about the various objects available there.In particular, I've found the following very useful on occasion:- ``collections.namedtuple``: Like a tuple, but each value has a name- ``collections.defaultdict``: Like a dictionary, but unspecified keys have a user-specified default value- ``collections.OrderedDict``: Like a dictionary, but the order of keys is maintainedOnce you've seen the standard built-in collection types, the use of these extended functionalities is very intuitive, and I'd suggest [reading about their use](https://docs.python.org/3/library/collections.html).
###Code
import collections
dir(collections)
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*Este notebook es una adaptación realizada por J. Rafael Rodríguez Galván del material "[Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp)" de Jake VanderPlas; tanto el [contenido original](https://github.com/jakevdp/WhirlwindTourOfPython) como la [adpatación actual](https://github.com/rrgalvan/PythonIntroMasterMatemat)] están disponibles en Github.**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
# Addition concatenates lists
L + [13, 17, 19]
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* **indexing**, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, ***slicing*** is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.---- **Nota**: - Me gusta enfatizar lo anterior en términos matemáticos. Cuando indicamos el rango de índices ``0:3`` estamos hablando de un intervalo de números enteros cerrado a la izquierda y abierto a la derecha. Es decir, Python usa la notación ``0:3``$=[0,3) \cap \mathbb{Z}$. En general, dados $m,n\in\mathbb{Z}$,$$\texttt{m:n} = [m,n) \cap \mathbb{Z}$$ - Esta elección de intervalo abierto/cerrado tiene algunas ventajas. Por ejemplo, la notación ``:n`` y la función ``range(n)``, que veremos más adelante, producirán un conjunto de exactamente $n$ índices, $0,1,...,n-1$.---If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the **step size**; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are ***immutable*** this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have **multiple return values**.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that **random element access is very fast**, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
###Markdown
*This notebook contains an excerpt from the [Whirlwind Tour of Python](http://www.oreilly.com/programming/free/a-whirlwind-tour-of-python.csp) by Jake VanderPlas; the content is available [on GitHub](https://github.com/jakevdp/WhirlwindTourOfPython).**The text and code are released under the [CC0](https://github.com/jakevdp/WhirlwindTourOfPython/blob/master/LICENSE) license; see also the companion project, the [Python Data Science Handbook](https://github.com/jakevdp/PythonDataScienceHandbook).* Built-In Data Structures We have seen Python's simple types: ``int``, ``float``, ``complex``, ``bool``, ``str``, and so on.Python also has several built-in compound types, which act as containers for other types.These compound types are:| Type Name | Example |Description ||-----------|---------------------------|---------------------------------------|| ``list`` | ``[1, 2, 3]`` | Ordered collection || ``tuple`` | ``(1, 2, 3)`` | Immutable ordered collection || ``dict`` | ``{'a':1, 'b':2, 'c':3}`` | Unordered (key,value) mapping || ``set`` | ``{1, 2, 3}`` | Unordered collection of unique values |As you can see, round, square, and curly brackets have distinct meanings when it comes to the type of collection produced.We'll take a quick tour of these data structures here. by ghwan 1 simple types: int float complex bool str 2 compund types: list tuple dict set ListsLists are the basic *ordered* and *mutable* data collection type in Python.They can be defined with comma-separated values between square brackets; for example, here is a list of the first several prime numbers:
###Code
L = [2, 3, 5, 7]
###Output
_____no_output_____
###Markdown
Lists have a number of useful properties and methods available to them.Here we'll take a quick look at some of the more common and useful ones:
###Code
# Length of a list
len(L)
# Append a value to the end
L.append(11)
L
id(L)
# Addition concatenates lists
L + [13, 17, 19]
id(L)
# sort() method sorts in-place
L = [2, 5, 1, 6, 3, 4]
L.sort()
L
###Output
_____no_output_____
###Markdown
In addition, there are many more built-in list methods; they are well-covered in Python's [online documentation](https://docs.python.org/3/tutorial/datastructures.html).While we've been demonstrating lists containing values of a single type, one of the powerful features of Python's compound objects is that they can contain objects of *any* type, or even a mix of types. For example:
###Code
L = [1, 'two', 3.14, [0, 3, 5]]
###Output
_____no_output_____
###Markdown
This flexibility is a consequence of Python's dynamic type system.Creating such a mixed sequence in a statically-typed language like C can be much more of a headache!We see that lists can even contain other lists as elements.Such type flexibility is an essential piece of what makes Python code relatively quick and easy to write.So far we've been considering manipulations of lists as a whole; another essential piece is the accessing of individual elements.This is done in Python via *indexing* and *slicing*, which we'll explore next. List indexing and slicingPython provides access to elements in compound types through *indexing* for single elements, and *slicing* for multiple elements.As we'll see, both are indicated by a square-bracket syntax.Suppose we return to our list of the first several primes:
###Code
L = [2, 3, 5, 7, 11]
###Output
_____no_output_____
###Markdown
Python uses *zero-based* indexing, so we can access the first and second element in using the following syntax:
###Code
L[0]
L[1]
###Output
_____no_output_____
###Markdown
Elements at the end of the list can be accessed with negative numbers, starting from -1:
###Code
L[-1]
L[-2]
###Output
_____no_output_____
###Markdown
You can visualize this indexing scheme this way:  Here values in the list are represented by large numbers in the squares; list indices are represented by small numbers above and below.In this case, ``L[2]`` returns ``5``, because that is the next value at index ``2``. Where *indexing* is a means of fetching a single value from the list, *slicing* is a means of accessing multiple values in sub-lists.It uses a colon to indicate the start point (inclusive) and end point (non-inclusive) of the sub-array.For example, to get the first three elements of the list, we can write:
###Code
L[0:3]
###Output
_____no_output_____
###Markdown
Notice where ``0`` and ``3`` lie in the preceding diagram, and how the slice takes just the values between the indices.If we leave out the first index, ``0`` is assumed, so we can equivalently write:
###Code
L[:3]
###Output
_____no_output_____
###Markdown
Similarly, if we leave out the last index, it defaults to the length of the list.Thus, the last three elements can be accessed as follows:
###Code
L[-3:]
###Output
_____no_output_____
###Markdown
Finally, it is possible to specify a third integer that represents the step size; for example, to select every second element of the list, we can write:
###Code
L[::2] # equivalent to L[0:len(L):2]
###Output
_____no_output_____
###Markdown
A particularly useful version of this is to specify a negative step, which will reverse the array:
###Code
L[::-1]
###Output
_____no_output_____
###Markdown
Both indexing and slicing can be used to set elements as well as access them.The syntax is as you would expect:
###Code
L[0] = 100
print(L)
L[1:3] = [55, 56]
print(L)
###Output
[100, 55, 56, 7, 11]
###Markdown
A very similar slicing syntax is also used in many data science-oriented packages, including NumPy and Pandas (mentioned in the introduction).Now that we have seen Python lists and how to access elements in ordered compound types, let's take a look at the other three standard compound data types mentioned earlier. TuplesTuples are in many ways similar to lists, but they are defined with parentheses rather than square brackets:
###Code
t = (1, 2, 3)
###Output
_____no_output_____
###Markdown
They can also be defined without any brackets at all:
###Code
t = 1, 2, 3
print(t)
###Output
(1, 2, 3)
###Markdown
Like the lists discussed before, tuples have a length, and individual elements can be extracted using square-bracket indexing:
###Code
len(t)
t[0]
###Output
_____no_output_____
###Markdown
The main distinguishing feature of tuples is that they are *immutable*: this means that once they are created, their size and contents cannot be changed:
###Code
t[1] = 4
t.append(4)
###Output
_____no_output_____
###Markdown
Tuples are often used in a Python program; a particularly common case is in functions that have multiple return values.For example, the ``as_integer_ratio()`` method of floating-point objects returns a numerator and a denominator; this dual return value comes in the form of a tuple:
###Code
x = 0.125
x.as_integer_ratio()
###Output
_____no_output_____
###Markdown
These multiple return values can be individually assigned as follows:
###Code
numerator, denominator = x.as_integer_ratio()
print(numerator / denominator)
###Output
0.125
###Markdown
The indexing and slicing logic covered earlier for lists works for tuples as well, along with a host of other methods.Refer to the online [Python documentation](https://docs.python.org/3/tutorial/datastructures.html) for a more complete list of these. DictionariesDictionaries are extremely flexible mappings of keys to values, and form the basis of much of Python's internal implementation.They can be created via a comma-separated list of ``key:value`` pairs within curly braces:
###Code
numbers = {'one':1, 'two':2, 'three':3}
###Output
_____no_output_____
###Markdown
Items are accessed and set via the indexing syntax used for lists and tuples, except here the index is not a zero-based order but valid key in the dictionary:
###Code
# Access a value via the key
numbers['two']
###Output
_____no_output_____
###Markdown
New items can be added to the dictionary using indexing as well:
###Code
# Set a new key:value pair
numbers['ninety'] = 90
print(numbers)
###Output
{'three': 3, 'ninety': 90, 'two': 2, 'one': 1}
###Markdown
Keep in mind that dictionaries do not maintain any sense of order for the input parameters; this is by design.This lack of ordering allows dictionaries to be implemented very efficiently, so that random element access is very fast, regardless of the size of the dictionary (if you're curious how this works, read about the concept of a *hash table*).The [python documentation](https://docs.python.org/3/library/stdtypes.html) has a complete list of the methods available for dictionaries. SetsThe fourth basic collection is the set, which contains unordered collections of unique items.They are defined much like lists and tuples, except they use the curly brackets of dictionaries:
###Code
primes = {2, 3, 5, 7}
odds = {1, 3, 5, 7, 9}
###Output
_____no_output_____
###Markdown
If you're familiar with the mathematics of sets, you'll be familiar with operations like the union, intersection, difference, symmetric difference, and others.Python's sets have all of these operations built-in, via methods or operators.For each, we'll show the two equivalent methods:
###Code
# union: items appearing in either
primes | odds # with an operator
primes.union(odds) # equivalently with a method
# intersection: items appearing in both
primes & odds # with an operator
primes.intersection(odds) # equivalently with a method
# difference: items in primes but not in odds
primes - odds # with an operator
primes.difference(odds) # equivalently with a method
# symmetric difference: items appearing in only one set
primes ^ odds # with an operator
primes.symmetric_difference(odds) # equivalently with a method
###Output
_____no_output_____
|
nbs/out - Outlier.ipynb
|
###Markdown
Outlier Detection - Anomaly Scores as Features* `out1` -- Isolation Forest
###Code
%load_ext autoreload
%autoreload 2
# add path
import sys; import os; sys.path.append(os.path.realpath("../"))
# demo datasets
from datasets.demo1 import X_train, Y_train, fold_ids, X_valid, Y_valid, meta as meta_data
#meta_data
# transformer implementations
typ = 'out1'
if typ is 'out1':
from verto.out1 import trans, meta
trans.set_params(**{'model__contamination': 0.15})
else:
tmp = __import__("verto."+typ, fromlist=['trans', 'meta'])
trans = tmp.trans
meta = tmp.meta
meta
###Output
_____no_output_____
###Markdown
Transform
###Code
%%time
trans.fit(X_train)
%%time
X_new = trans.transform(X_train)
from seasalt import create_feature_names
feature_names = create_feature_names(meta['feature_names_prefix'], X_new.shape[1])
print(feature_names)
import pandas as pd
df_new = pd.DataFrame(data=X_new, columns=feature_names)
###Output
_____no_output_____
###Markdown
Evaluate- check if the anomaly score is a "good" predictor- eyeball the p-values of the logistic regression coefficients
###Code
df_new.head()
import statsmodels.api as sm
#lr = sm.Logit(Y_train, sm.add_constant(X_new)).fit()
lr = sm.Logit(Y_train, sm.add_constant(X_new)).fit_regularized(method='l1', alpha=.5)
print(lr.summary())
###Output
Optimization terminated successfully. (Exit mode 0)
Current function value: 0.5974161143654582
Iterations: 22
Function evaluations: 22
Gradient evaluations: 22
Logit Regression Results
==============================================================================
Dep. Variable: y No. Observations: 398
Model: Logit Df Residuals: 396
Method: MLE Df Model: 1
Date: Thu, 11 Apr 2019 Pseudo R-squ.: 0.1430
Time: 17:15:21 Log-Likelihood: -225.55
converged: True LL-Null: -263.17
LLR p-value: 4.131e-18
==============================================================================
coef std err z P>|z| [0.025 0.975]
------------------------------------------------------------------------------
const 7.5651 1.127 6.711 0.000 5.356 9.775
x1 16.8875 2.692 6.273 0.000 11.611 22.164
==============================================================================
|
colabs/ctv_audience_affinity.ipynb
|
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'partner_id': '', # DV360 Partner id.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unles you are changing the recipe, click play.
###Code
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app'
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "SELECT audience_app.app_url, audience_app.ctv_app_name, IF (audience_app.app_url LIKE '%Android%' OR audience_app.app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 100%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT app_url, CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` ) AS country_app ON country_app.app_url = audience_app.app_url CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True)
project.execute()
###Output
_____no_output_____
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CLIENT CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'auth_write': 'service', # Credentials used for writing data.
'partner_id': '', # DV360 Partner id.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unless you are changing the recipe, click play.
###Code
from starthinker.util.configuration import Configuration
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app',
'header': True
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "WITH audience_app_clean AS ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, IF (app_url LIKE '%Android%' OR app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ), us_country_app_clean AS ( SELECT a.app_url, ctv_app.CTV_App_name AS ctv_app_name, CAST( IF (CAST(a.impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.impressions AS int64)) AS int64) AS POtential_ImpressionS, CAST( IF (CAST(a.uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) SELECT audience_app.ctv_app_name, audience_app.app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS POTential_impressions, CAST( IF (uniques LIKE '%< 100%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app_name, user_list, app_or_domain, SUM(potential_impressions) as poTEntial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM audience_app_clean GROUP BY ctv_app_name, user_list, app_or_domain) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT ctv_app_name, SUM(potential_impressions) as potENtial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM us_country_app_clean GROUP BY ctv_app_name) AS country_app ON country_app.ctv_app_name = audience_app.ctv_app_name CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS PotenTial_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
###Output
_____no_output_____
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'auth_write': 'service', # Credentials used for writing data.
'partner_id': '', # DV360 Partner id.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unless you are changing the recipe, click play.
###Code
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app',
'header': True
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "WITH audience_app_clean AS ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, IF (app_url LIKE '%Android%' OR app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ), us_country_app_clean AS ( SELECT a.app_url, ctv_app.CTV_App_name AS ctv_app_name, CAST( IF (CAST(a.impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.impressions AS int64)) AS int64) AS POtential_ImpressionS, CAST( IF (CAST(a.uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) SELECT audience_app.ctv_app_name, audience_app.app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS POTential_impressions, CAST( IF (uniques LIKE '%< 100%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app_name, user_list, app_or_domain, SUM(potential_impressions) as poTEntial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM audience_app_clean GROUP BY ctv_app_name, user_list, app_or_domain) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT ctv_app_name, SUM(potential_impressions) as potENtial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM us_country_app_clean GROUP BY ctv_app_name) AS country_app ON country_app.ctv_app_name = audience_app.ctv_app_name CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS PotenTial_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
###Output
_____no_output_____
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'auth_write': 'service', # Credentials used for writing data.
'partner_id': '', # DV360 Partner id.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unles you are changing the recipe, click play.
###Code
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app'
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "SELECT audience_app.app_url, audience_app.ctv_app_name, IF (audience_app.app_url LIKE '%Android%' OR audience_app.app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 100%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT app_url, CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` ) AS country_app ON country_app.app_url = audience_app.app_url CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
###Output
_____no_output_____
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'auth_write': 'service', # Credentials used for writing data.
'partner_id': '', # DV360 Partner id.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unles you are changing the recipe, click play.
###Code
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app'
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "SELECT audience_app.app_url, audience_app.ctv_app_name, IF (audience_app.app_url LIKE '%Android%' OR audience_app.app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 100%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT app_url, CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` ) AS country_app ON country_app.app_url = audience_app.app_url CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
###Output
_____no_output_____
###Markdown
cTV Inventory Availability DashboardThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. LicenseCopyright 2020 Google LLC,Licensed under the Apache License, Version 2.0 (the "License");you may not use this file except in compliance with the License.You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0Unless required by applicable law or agreed to in writing, softwaredistributed under the License is distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.See the License for the specific language governing permissions andlimitations under the License. DisclaimerThis is not an officially supported Google product. It is a reference implementation. There is absolutely NO WARRANTY provided for using this code. The code is Apache Licensed and CAN BE fully modified, white labeled, and disassembled by your team.This code generated (see starthinker/scripts for possible source): - **Command**: "python starthinker_ui/manage.py colab" - **Command**: "python starthinker/tools/colab.py [JSON RECIPE]" 1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Set ConfigurationThis code is required to initialize the project. Fill in required fields and press play.1. If the recipe uses a Google Cloud Project: - Set the configuration **project** value to the project identifier from [these instructions](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md).1. If the recipe has **auth** set to **user**: - If you have user credentials: - Set the configuration **user** value to your user credentials JSON. - If you DO NOT have user credentials: - Set the configuration **client** value to [downloaded client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md).1. If the recipe has **auth** set to **service**: - Set the configuration **service** value to [downloaded service credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_service.md).
###Code
from starthinker.util.configuration import Configuration
CONFIG = Configuration(
project="",
client={},
service={},
user="/content/user.json",
verbose=True
)
###Output
_____no_output_____
###Markdown
3. Enter cTV Inventory Availability Dashboard Recipe Parameters 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'auth_write': 'service', # Credentials used for writing data.
'partner_id': '', # DV360 Partner id.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
4. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unless you are changing the recipe, click play.
###Code
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name', 'prefix': 'cTV App Match Table ', 'kind': 'string', 'order': 1, 'description': 'Name of document to deploy to.', 'default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'order': 3, 'default': '', 'description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name', 'kind': 'string', 'prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id', 'kind': 'integer', 'order': 1, 'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'order': 3, 'default': '', 'description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name', 'kind': 'string', 'prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id', 'kind': 'integer', 'order': 1, 'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'order': 3, 'default': '', 'description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids', 'kind': 'integer_list', 'order': 2, 'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name', 'kind': 'string', 'prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id', 'kind': 'integer', 'order': 1, 'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'order': 3, 'default': '', 'description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids', 'kind': 'integer_list', 'order': 2, 'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name', 'kind': 'string', 'prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id', 'kind': 'integer', 'order': 1, 'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'order': 3, 'default': '', 'description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app',
'header': True
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name', 'prefix': 'cTV App Match Table ', 'kind': 'string', 'order': 1, 'description': 'Name of document to deploy to.', 'default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "WITH audience_app_clean AS ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, IF (app_url LIKE '%Android%' OR app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ), us_country_app_clean AS ( SELECT a.app_url, ctv_app.CTV_App_name AS ctv_app_name, CAST( IF (CAST(a.impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.impressions AS int64)) AS int64) AS POtential_ImpressionS, CAST( IF (CAST(a.uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) SELECT audience_app.ctv_app_name, audience_app.app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS POTential_impressions, CAST( IF (uniques LIKE '%< 100%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app_name, user_list, app_or_domain, SUM(potential_impressions) as poTEntial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM audience_app_clean GROUP BY ctv_app_name, user_list, app_or_domain) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT ctv_app_name, SUM(potential_impressions) as potENtial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM us_country_app_clean GROUP BY ctv_app_name) AS country_app ON country_app.ctv_app_name = audience_app.ctv_app_name CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS PotenTial_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project', 'kind': 'string', 'description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset', 'kind': 'string', 'description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project', 'kind': 'string', 'description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset', 'kind': 'string', 'description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project', 'kind': 'string', 'description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset', 'kind': 'string', 'description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project', 'kind': 'string', 'description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset', 'kind': 'string', 'description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project', 'kind': 'string', 'description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset', 'kind': 'string', 'description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project', 'kind': 'string', 'description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset', 'kind': 'string', 'description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset', 'kind': 'string', 'description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(CONFIG, TASKS, force=True)
###Output
_____no_output_____
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # Place where tables will be written in BigQuery.
'recipe_project': '', # Project where BigQuery dataset will be created.
'partner_id': '', # DV360 Partner id.
'auth_write': 'service', # Credentials used for writing data.
'recipe_name': '', # Name of document to deploy to.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unles you are changing the recipe, click play.
###Code
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'prefix': 'cTV App Match Table ','order': 1,'kind': 'string','description': 'Name of document to deploy to.','name': 'recipe_name','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'description': 'BigQuery Dataset where all data will live.','name': 'dataset','order': 3,'default': '','kind': 'string'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'metadata': {
'format': 'CSV',
'dataRange': 'LAST_30_DAYS',
'sendNotification': False,
'title': {'field': {'kind': 'string','name': 'recipe_name','prefix': 'us_country_app_'}}
},
'kind': 'doubleclickbidmanager#query',
'timezoneCode': 'America/Los_Angeles',
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'kind': 'integer','name': 'partner_id','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'includeInviteData': True,
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
]
},
'schedule': {
'frequency': 'DAILY',
'endTimeMs': 7983727200000,
'nextRunTimezoneCode': 'America/Los_Angeles',
'nextRunMinuteOfDay': 0
}
}
},
'out': {
'bigquery': {
'table': 'us_country_app',
'dataset': {'field': {'description': 'BigQuery Dataset where all data will live.','name': 'dataset','order': 3,'default': '','kind': 'string'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'metadata': {
'format': 'CSV',
'dataRange': 'LAST_30_DAYS',
'sendNotification': False,
'title': {'field': {'kind': 'string','name': 'recipe_name','prefix': 'us_country_baseline_'}}
},
'kind': 'doubleclickbidmanager#query',
'timezoneCode': 'America/Los_Angeles',
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'kind': 'integer','name': 'partner_id','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'includeInviteData': True,
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
]
},
'schedule': {
'frequency': 'DAILY',
'endTimeMs': 7983727200000,
'nextRunTimezoneCode': 'America/Los_Angeles',
'nextRunMinuteOfDay': 0
}
}
},
'out': {
'bigquery': {
'table': 'us_country_baseline',
'dataset': {'field': {'description': 'BigQuery Dataset where all data will live.','name': 'dataset','order': 3,'default': '','kind': 'string'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'metadata': {
'format': 'CSV',
'dataRange': 'LAST_30_DAYS',
'sendNotification': False,
'title': {'field': {'kind': 'string','name': 'recipe_name','prefix': 'us_audience_baseline_'}}
},
'kind': 'doubleclickbidmanager#query',
'timezoneCode': 'America/Los_Angeles',
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'kind': 'integer','name': 'partner_id','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'includeInviteData': True,
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
]
},
'schedule': {
'frequency': 'DAILY',
'endTimeMs': 7983727200000,
'nextRunTimezoneCode': 'America/Los_Angeles',
'nextRunMinuteOfDay': 0
}
},
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'kind': 'integer_list','name': 'audience_ids','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
}
},
'out': {
'bigquery': {
'table': 'us_audience_baseline',
'dataset': {'field': {'description': 'BigQuery Dataset where all data will live.','name': 'dataset','order': 3,'default': '','kind': 'string'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'metadata': {
'format': 'CSV',
'dataRange': 'LAST_30_DAYS',
'sendNotification': False,
'title': {'field': {'kind': 'string','name': 'recipe_name','prefix': 'us_audience_app_'}}
},
'kind': 'doubleclickbidmanager#query',
'timezoneCode': 'America/Los_Angeles',
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'kind': 'integer','name': 'partner_id','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'includeInviteData': True,
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
]
},
'schedule': {
'frequency': 'DAILY',
'endTimeMs': 7983727200000,
'nextRunTimezoneCode': 'America/Los_Angeles',
'nextRunMinuteOfDay': 0
}
},
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'kind': 'integer_list','name': 'audience_ids','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
}
},
'out': {
'bigquery': {
'table': 'us_audience_app',
'dataset': {'field': {'description': 'BigQuery Dataset where all data will live.','name': 'dataset','order': 3,'default': '','kind': 'string'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'sheets': {
'sheet': {'field': {'prefix': 'cTV App Match Table ','order': 1,'kind': 'string','description': 'Name of document to deploy to.','name': 'recipe_name','default': ''}},
'out': {
'auth': 'user',
'bigquery': {
'table': 'CTV_App_Lookup',
'dataset': {'field': {'kind': 'string','name': 'dataset','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
},
'auth': 'user',
'range': 'A:Z',
'header': True,
'tab': 'data'
}
},
{
'bigquery': {
'auth': 'user',
'description': 'The query to join all the IAR reports into an Affinity Index.',
'to': {
'table': 'final_table',
'dataset': {'field': {'kind': 'string','name': 'dataset','description': 'BigQuery Dataset where all data will live.'}}
},
'from': {
'legacy': False,
'query': "SELECT audience_app.app_url, audience_app.ctv_app_name, IF (audience_app.app_url LIKE '%Android%' OR audience_app.app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 100%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT app_url, CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` ) AS country_app ON country_app.app_url = audience_app.app_url CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'parameters': [
{'field': {'kind': 'string','name': 'recipe_project','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'kind': 'string','name': 'dataset','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'kind': 'string','name': 'recipe_project','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'kind': 'string','name': 'dataset','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'kind': 'string','name': 'recipe_project','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'kind': 'string','name': 'dataset','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'kind': 'string','name': 'recipe_project','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'kind': 'string','name': 'dataset','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'kind': 'string','name': 'recipe_project','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'kind': 'string','name': 'dataset','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'kind': 'string','name': 'recipe_project','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'kind': 'string','name': 'dataset','description': 'Place where tables will be written in BigQuery.'}}
]
}
}
}
]
json_set_fields(TASKS, FIELDS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True, _force=True)
project.execute(_force=True)
###Output
_____no_output_____
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'auth_write': 'service', # Credentials used for writing data.
'partner_id': '', # DV360 Partner id.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unles you are changing the recipe, click play.
###Code
from starthinker.util.project import project
from starthinker.script.parse import json_set_fields, json_expand_includes
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline'
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app'
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "SELECT audience_app.app_url, audience_app.ctv_app_name, IF (audience_app.app_url LIKE '%Android%' OR audience_app.app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 100%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, CAST( IF (impressions LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT app_url, CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` ) AS country_app ON country_app.app_url = audience_app.app_url CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', 0, CAST(impressions AS int64)) AS int64) AS Potential_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', 0, CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
json_expand_includes(TASKS)
project.initialize(_recipe={ 'tasks':TASKS }, _project=CLOUD_PROJECT, _user=USER_CREDENTIALS, _client=CLIENT_CREDENTIALS, _verbose=True)
project.execute()
###Output
_____no_output_____
###Markdown
1. Install DependenciesFirst install the libraries needed to execute recipes, this only needs to be done once, then click play.
###Code
!pip install git+https://github.com/google/starthinker
###Output
_____no_output_____
###Markdown
2. Get Cloud Project IDTo run this recipe [requires a Google Cloud Project](https://github.com/google/starthinker/blob/master/tutorials/cloud_project.md), this only needs to be done once, then click play.
###Code
CLOUD_PROJECT = 'PASTE PROJECT ID HERE'
print("Cloud Project Set To: %s" % CLOUD_PROJECT)
###Output
_____no_output_____
###Markdown
3. Get Client CredentialsTo read and write to various endpoints requires [downloading client credentials](https://github.com/google/starthinker/blob/master/tutorials/cloud_client_installed.md), this only needs to be done once, then click play.
###Code
CLIENT_CREDENTIALS = 'PASTE CREDENTIALS HERE'
print("Client Credentials Set To: %s" % CLIENT_CREDENTIALS)
###Output
_____no_output_____
###Markdown
4. Enter cTV Inventory Availability Dashboard ParametersThe cTV Audience Affinity dashboard is designed to give clients insights into which cTV apps their audiences have a high affinity for using. The goal of this dashboard is to provide some assistance with the lack of audience targeting for cTV within DV360. 1. Find instructions and recommendations for this dashboard hereModify the values below for your use case, can be done multiple times, then click play.
###Code
FIELDS = {
'dataset': '', # BigQuery Dataset where all data will live.
'recipe_project': '', # Project where BigQuery dataset will be created.
'recipe_name': '', # Name of document to deploy to.
'auth_write': 'service', # Credentials used for writing data.
'partner_id': '', # DV360 Partner id.
'auth_read': 'user', # Credentials used for reading data.
'audience_ids': '', # Comma separated list of Audience Ids
}
print("Parameters Set To: %s" % FIELDS)
###Output
_____no_output_____
###Markdown
5. Execute cTV Inventory Availability DashboardThis does NOT need to be modified unless you are changing the recipe, click play.
###Code
from starthinker.util.configuration import Configuration
from starthinker.util.configuration import commandline_parser
from starthinker.util.configuration import execute
from starthinker.util.recipe import json_set_fields
USER_CREDENTIALS = '/content/user.json'
TASKS = [
{
'drive': {
'auth': 'user',
'copy': {
'source': 'https://docs.google.com/spreadsheets/d/1PPPk2b4gGJHNgQ4hXLiTKzH8pRIdlF5fNy9VCw1v7tM/',
'destination': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}}
}
}
},
{
'dataset': {
'auth': 'user',
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_app',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_country_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_country_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_baseline_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_baseline',
'header': True
}
}
}
},
{
'dbm': {
'auth': 'user',
'report': {
'filters': {
'FILTER_USER_LIST': {
'single_cell': True,
'values': {'field': {'name': 'audience_ids','kind': 'integer_list','order': 2,'description': 'Comma separated list of Audience Ids'}}
}
},
'body': {
'timezoneCode': 'America/Los_Angeles',
'kind': 'doubleclickbidmanager#query',
'metadata': {
'title': {'field': {'name': 'recipe_name','kind': 'string','prefix': 'us_audience_app_'}},
'dataRange': 'LAST_30_DAYS',
'format': 'CSV',
'sendNotification': False
},
'params': {
'type': 'TYPE_INVENTORY_AVAILABILITY',
'groupBys': [
'FILTER_APP_URL',
'FILTER_AUDIENCE_LIST'
],
'filters': [
{
'type': 'FILTER_PARTNER',
'value': {'field': {'name': 'partner_id','kind': 'integer','order': 1,'description': 'DV360 Partner id.'}}
},
{
'type': 'FILTER_INVENTORY_FORMAT',
'value': 'VIDEO'
},
{
'type': 'FILTER_COUNTRY',
'value': 'US'
}
],
'metrics': [
'METRIC_BID_REQUESTS',
'METRIC_UNIQUE_VISITORS_COOKIES'
],
'includeInviteData': True
},
'schedule': {
'frequency': 'DAILY',
'nextRunMinuteOfDay': 0,
'nextRunTimezoneCode': 'America/Los_Angeles',
'endTimeMs': 7983727200000
}
}
},
'out': {
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','order': 3,'default': '','description': 'BigQuery Dataset where all data will live.'}},
'schema': [
{
'type': 'STRING',
'name': 'app_url',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'user_list',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'impressions',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'uniques',
'mode': 'NULLABLE'
}
],
'table': 'us_audience_app',
'header': True
}
}
}
},
{
'sheets': {
'auth': 'user',
'sheet': {'field': {'name': 'recipe_name','prefix': 'cTV App Match Table ','kind': 'string','order': 1,'description': 'Name of document to deploy to.','default': ''}},
'tab': 'data',
'range': 'A:Z',
'header': True,
'out': {
'auth': 'user',
'bigquery': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'CTV_App_Lookup',
'schema': [
{
'type': 'STRING',
'name': 'Publisher_Name',
'mode': 'NULLABLE'
},
{
'type': 'STRING',
'name': 'CTV_App_name',
'mode': 'NULLABLE'
}
]
}
}
}
},
{
'bigquery': {
'description': 'The query to join all the IAR reports into an Affinity Index.',
'auth': 'user',
'from': {
'query': "WITH audience_app_clean AS ( SELECT ctv_app.CTV_App_name AS ctv_app_name, user_list, app_url, IF (app_url LIKE '%Android%' OR app_url LIKE '%iOS', 'App', 'Domain') AS app_or_domain, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS potential_impressions, CAST( IF (uniques LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ), us_country_app_clean AS ( SELECT a.app_url, ctv_app.CTV_App_name AS ctv_app_name, CAST( IF (CAST(a.impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.impressions AS int64)) AS int64) AS POtential_ImpressionS, CAST( IF (CAST(a.uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(a.uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_app` AS a LEFT JOIN `[PARAMETER].[PARAMETER].CTV_App_Lookup` AS ctv_app ON a.app_url = ctv_app.Publisher_Name ) SELECT audience_app.ctv_app_name, audience_app.app_or_domain, audience_app.user_list AS audience_list, audience_app.Potential_Impressions AS audience_app_impressions, audience_app.Unique_Cookies_With_Impressions AS audience_app_uniques, audience_baseline.Potential_Impressions AS audience_baseline_impressions, audience_baseline.Unique_Cookies_With_Impressions AS audience_baseline_uniques, country_app.Potential_Impressions AS country_app_impressions, country_app.Unique_Cookies_With_Impressions AS country_app_uniques, country_baseline.Potential_Impressions AS country_baseline_impressions, country_baseline.Unique_Cookies_With_Impressions AS country_baseline_uniques, ((audience_app.Unique_Cookies_With_Impressions/NULLIF(audience_baseline.Unique_Cookies_With_Impressions, 0))/NULLIF((country_app.Unique_Cookies_With_Impressions/NULLIF(CAST(country_baseline.Unique_Cookies_With_Impressions AS int64), 0)), 0))*100 AS affinity_index FROM ( SELECT user_list, CAST( IF (cast(impressions as string) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS POTential_impressions, CAST( IF (uniques LIKE '%< 100%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS unique_cookies_with_impressions FROM `[PARAMETER].[PARAMETER].us_audience_baseline` ) AS audience_baseline JOIN ( SELECT ctv_app_name, user_list, app_or_domain, SUM(potential_impressions) as poTEntial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM audience_app_clean GROUP BY ctv_app_name, user_list, app_or_domain) AS audience_app ON audience_baseline.user_list = audience_app.user_list LEFT JOIN ( SELECT ctv_app_name, SUM(potential_impressions) as potENtial_impressions, SUM(unique_cookies_with_impressions) as unique_cookies_with_impressions, FROM us_country_app_clean GROUP BY ctv_app_name) AS country_app ON country_app.ctv_app_name = audience_app.ctv_app_name CROSS JOIN ( SELECT CAST( IF (CAST(impressions AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(impressions AS int64)) AS int64) AS PotenTial_Impressions, CAST( IF (CAST(uniques AS STRING) LIKE '%< 1000%', cast(0 as int64), CAST(uniques AS int64)) AS int64) AS Unique_Cookies_With_Impressions FROM `[PARAMETER].[PARAMETER].us_country_baseline` ) AS country_baseline",
'legacy': False,
'parameters': [
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}},
{'field': {'name': 'recipe_project','kind': 'string','description': 'Project where BigQuery dataset will be created.'}},
{'field': {'name': 'dataset','kind': 'string','description': 'Place where tables will be written in BigQuery.'}}
]
},
'to': {
'dataset': {'field': {'name': 'dataset','kind': 'string','description': 'BigQuery Dataset where all data will live.'}},
'table': 'final_table'
}
}
}
]
json_set_fields(TASKS, FIELDS)
execute(Configuration(project=CLOUD_PROJECT, client=CLIENT_CREDENTIALS, user=USER_CREDENTIALS, verbose=True), TASKS, force=True)
###Output
_____no_output_____
|
.ipynb_checkpoints/TFQuantum_starter-checkpoint.ipynb
|
###Markdown
[](https://colab.research.google.com/github/Rishit-dagli/TFUG-Mysuru-2020/blob/master/TFQuantum_starter.ipynb) Getting started with [TensorFlow Quantum](https://www.tensorflow.org/quantum)In this notebook you will build your first hybrid quantum classical model with [Cirq](https://cirq.readthedocs.io/en/stable/) and TensorFlow Quantum (TFQ). We will build a very simple model to dobinary classification in this notebook. You will then use Keras to create a wrapper for the model and simulate it totrain and evluate the model. > Note: This notebook is designed to be run in Google Colab if you want to run it locally or on a Jupyter notebook you would skip the code cells with the `Colab only` comment. Setup Install TensorFlow 2.x (Colab only)
###Code
# Colab only
pip install -q tensorflow==2.1.0
###Output
_____no_output_____
###Markdown
Install TensorFlow Quantum (Colab only)
###Code
# Colab only
pip install -q tensorflow-quantum
###Output
_____no_output_____
###Markdown
Imports Now import TensorFlow and the module dependencies:
###Code
import cirq
import random
import numpy as np
import sympy
import tensorflow as tf
import tensorflow_quantum as tfq
from matplotlib import pyplot as plt
from cirq.contrib.svg import SVGCircuit
###Output
_____no_output_____
###Markdown
Place a qubit on the gridYou will then place a qubit on thee grid
###Code
qubit = cirq.GridQubit(0, 0)
###Output
_____no_output_____
###Markdown
Prepare quantum dataThe first thing you would do is set up the labels and parameters for preparation of the quantum data. For simplicityhere we have included just 2 data points `a` and `b`.
###Code
expected_labels = np.array([[1, 0], [0, 1]])
###Output
_____no_output_____
###Markdown
Randonly rotate `x` and `z` axis
###Code
angle = np.random.uniform(0, 2 * np.pi)
###Output
_____no_output_____
###Markdown
Building the quantum CircuitYou will now build the quantum circuit and also convert it into a tensor
###Code
a = cirq.Circuit(cirq.ry(angle)(qubit))
b = cirq.Circuit(cirq.ry(angle + np.pi / 2)(qubit))
quantum_data = tfq.convert_to_tensor([a, b])
SVGCircuit(a)
SVGCircuit(b)
###Output
_____no_output_____
###Markdown
Building the hybrid modelThis section also shows the interoperatability between TensorFlow and Cirq. With the TFQ PQC layer you can easilyembed your quantum part of the model within a standard classical Keras model.
###Code
q_data_input = tf.keras.Input(shape = (), dtype = tf.dtypes.string)
theta = sympy.Symbol("theta")
q_model = cirq.Circuit(cirq.ry(theta)(qubit))
expectation = tfq.layers.PQC(q_model, cirq.Z(qubit))
expectation_output = expectation(q_data_input)
classifier = tf.keras.layers.Dense(2, activation = tf.keras.activations.softmax)
classifier_output = classifier(expectation_output)
###Output
_____no_output_____
###Markdown
You will now define the optimizer and loss functions for your model
###Code
model = tf.keras.Model(inputs = q_data_input,
outputs = classifier_output)
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate = 0.1),
loss = tf.keras.losses.CategoricalCrossentropy())
###Output
_____no_output_____
###Markdown
Traaining the modelTraining the model is just like training any other Keras model and is made easy.
###Code
history = model.fit(x = quantum_data,
y = expected_labels,
epochs = 250)
###Output
Train on 2 samples
Epoch 1/250
2/2 [==============================] - 2s 1s/sample - loss: 0.6110
Epoch 2/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.5516
Epoch 3/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.4943
Epoch 4/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.4388
Epoch 5/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.3850
Epoch 6/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.3356
Epoch 7/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.2923
Epoch 8/250
2/2 [==============================] - 0s 2ms/sample - loss: 0.2556
Epoch 9/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.2249
Epoch 10/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.1994
Epoch 11/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.1778
Epoch 12/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.1592
Epoch 13/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.1428
Epoch 14/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.1283
Epoch 15/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.1156
Epoch 16/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.1044
Epoch 17/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0944
Epoch 18/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0854
Epoch 19/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0773
Epoch 20/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0701
Epoch 21/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0637
Epoch 22/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0581
Epoch 23/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0532
Epoch 24/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0488
Epoch 25/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0450
Epoch 26/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0416
Epoch 27/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0386
Epoch 28/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0360
Epoch 29/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0337
Epoch 30/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0316
Epoch 31/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0297
Epoch 32/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0281
Epoch 33/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0266
Epoch 34/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0252
Epoch 35/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0240
Epoch 36/250
2/2 [==============================] - 0s 2ms/sample - loss: 0.0229
Epoch 37/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0219
Epoch 38/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0210
Epoch 39/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0202
Epoch 40/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0194
Epoch 41/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0187
Epoch 42/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0181
Epoch 43/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0175
Epoch 44/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0169
Epoch 45/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0164
Epoch 46/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0159
Epoch 47/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0155
Epoch 48/250
2/2 [==============================] - 0s 2ms/sample - loss: 0.0150
Epoch 49/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0146
Epoch 50/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0143
Epoch 51/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0139
Epoch 52/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0136
Epoch 53/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0133
Epoch 54/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0130
Epoch 55/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0127
Epoch 56/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0124
Epoch 57/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0122
Epoch 58/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0119
Epoch 59/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0117
Epoch 60/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0115
Epoch 61/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0112
Epoch 62/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0110
Epoch 63/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0108
Epoch 64/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0106
Epoch 65/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0105
Epoch 66/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0103
Epoch 67/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0101
Epoch 68/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0100
Epoch 69/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0098
Epoch 70/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0096
Epoch 71/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0095
Epoch 72/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0094
Epoch 73/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0092
Epoch 74/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0091
Epoch 75/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0090
Epoch 76/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0088
Epoch 77/250
2/2 [==============================] - 0s 2ms/sample - loss: 0.0087
Epoch 78/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0086
Epoch 79/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0085
Epoch 80/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0083
Epoch 81/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0082
Epoch 82/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0081
Epoch 83/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0080
Epoch 84/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0079
Epoch 85/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0078
Epoch 86/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0077
Epoch 87/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0076
Epoch 88/250
2/2 [==============================] - 0s 6ms/sample - loss: 0.0075
Epoch 89/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0074
Epoch 90/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0073
Epoch 91/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0073
Epoch 92/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0072
Epoch 93/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0071
Epoch 94/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0070
Epoch 95/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0069
Epoch 96/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0068
Epoch 97/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0068
Epoch 98/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0067
Epoch 99/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0066
Epoch 100/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0065
Epoch 101/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0065
Epoch 102/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0064
Epoch 103/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0063
Epoch 104/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0063
Epoch 105/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0062
Epoch 106/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0061
Epoch 107/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0061
Epoch 108/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0060
Epoch 109/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0059
Epoch 110/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0059
Epoch 111/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0058
Epoch 112/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0058
Epoch 113/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0057
Epoch 114/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0056
Epoch 115/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0056
Epoch 116/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0055
Epoch 117/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0055
Epoch 118/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0054
Epoch 119/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0054
Epoch 120/250
2/2 [==============================] - 0s 6ms/sample - loss: 0.0053
Epoch 121/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0053
Epoch 122/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0052
Epoch 123/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0052
Epoch 124/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0051
Epoch 125/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0051
Epoch 126/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0050
Epoch 127/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0050
Epoch 128/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0049
Epoch 129/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0049
Epoch 130/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0048
Epoch 131/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0048
Epoch 132/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0048
Epoch 133/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0047
Epoch 134/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0047
Epoch 135/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0046
Epoch 136/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0046
Epoch 137/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0045
Epoch 138/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0045
Epoch 139/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0045
Epoch 140/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0044
Epoch 141/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0044
Epoch 142/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0044
Epoch 143/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0043
Epoch 144/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0043
Epoch 145/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0042
Epoch 146/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0042
Epoch 147/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0042
Epoch 148/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0041
Epoch 149/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0041
Epoch 150/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0041
Epoch 151/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0040
Epoch 152/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0040
Epoch 153/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0040
Epoch 154/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0039
Epoch 155/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0039
Epoch 156/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0039
Epoch 157/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0038
Epoch 158/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0038
Epoch 159/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0038
Epoch 160/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0038
Epoch 161/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0037
Epoch 162/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0037
Epoch 163/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0037
Epoch 164/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0036
Epoch 165/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0036
Epoch 166/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0036
Epoch 167/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0036
Epoch 168/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0035
Epoch 169/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0035
Epoch 170/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0035
Epoch 171/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0035
Epoch 172/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0034
Epoch 173/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0034
Epoch 174/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0034
Epoch 175/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0034
Epoch 176/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0033
Epoch 177/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0033
Epoch 178/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0033
Epoch 179/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0033
Epoch 180/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0032
Epoch 181/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0032
Epoch 182/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0032
Epoch 183/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0032
Epoch 184/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0031
Epoch 185/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0031
Epoch 186/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0031
Epoch 187/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0031
Epoch 188/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0031
Epoch 189/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0030
Epoch 190/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0030
Epoch 191/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0030
Epoch 192/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0030
Epoch 193/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0030
Epoch 194/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0029
Epoch 195/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0029
Epoch 196/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0029
Epoch 197/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0029
Epoch 198/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0029
Epoch 199/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0028
Epoch 200/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0028
Epoch 201/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0028
Epoch 202/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0028
Epoch 203/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0028
Epoch 204/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0027
Epoch 205/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0027
Epoch 206/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0027
Epoch 207/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0027
Epoch 208/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0027
Epoch 209/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0027
Epoch 210/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0026
Epoch 211/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0026
Epoch 212/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0026
Epoch 213/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0026
Epoch 214/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0026
Epoch 215/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0026
Epoch 216/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0025
Epoch 217/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0025
Epoch 218/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0025
Epoch 219/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0025
Epoch 220/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0025
Epoch 221/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0025
Epoch 222/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0025
Epoch 223/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0024
Epoch 224/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0024
Epoch 225/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0024
Epoch 226/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0024
Epoch 227/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0024
Epoch 228/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0024
Epoch 229/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0024
Epoch 230/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0023
Epoch 231/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0023
Epoch 232/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0023
Epoch 233/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0023
Epoch 234/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0023
Epoch 235/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0023
Epoch 236/250
2/2 [==============================] - 0s 4ms/sample - loss: 0.0023
Epoch 237/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0023
Epoch 238/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0022
Epoch 239/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0022
Epoch 240/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0022
Epoch 241/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0022
Epoch 242/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0022
Epoch 243/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0022
Epoch 244/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0022
Epoch 245/250
2/2 [==============================] - 0s 5ms/sample - loss: 0.0022
Epoch 246/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0021
Epoch 247/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0021
Epoch 248/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0021
Epoch 249/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0021
Epoch 250/250
2/2 [==============================] - 0s 3ms/sample - loss: 0.0021
###Markdown
Evaluating the model
###Code
plt.plot(history.history['loss'])
plt.title('model loss')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train'], loc='upper left')
plt.show()
###Output
_____no_output_____
###Markdown
Performing inference
###Code
noise = np.random.uniform(-0.25, 0.25, 2)
test_data = tfq.convert_to_tensor([
cirq.Circuit(cirq.ry(noise[0])(qubit)),
cirq.Circuit(cirq.ry(noise[1] + np.pi/2)(qubit)),
])
###Output
_____no_output_____
###Markdown
You can see in the below cell that our model does a good job with this data though it was very easy.
###Code
predictions = model.predict(test_data)
predictions
###Output
_____no_output_____
|
notebooks/PracticeSession/Practice 3 - Content Based Filtering.ipynb
|
###Markdown
Recommender Systems 2017/18 Practice 3 - Content Based recommenders Load the data you saw last time:
###Code
from urllib.request import urlretrieve
import zipfile
# skip the download
#urlretrieve ("http://files.grouplens.org/datasets/movielens/ml-10m.zip", "movielens_10m.zip")
#dataFile = zipfile.ZipFile("movielens_10m.zip")
URM_path = "ml-10M100K/ratings.dat"
URM_file = open(URM_path, 'r')
def rowSplit (rowString):
split = rowString.split("::")
split[3] = split[3].replace("\n","")
split[0] = int(split[0])
split[1] = int(split[1])
split[2] = float(split[2])
split[3] = int(split[3])
result = tuple(split)
return result
URM_file.seek(0)
URM_tuples = []
for line in URM_file:
URM_tuples.append(rowSplit (line))
userList, itemList, ratingList, timestampList = zip(*URM_tuples)
userList = list(userList)
itemList = list(itemList)
ratingList = list(ratingList)
timestampList = list(timestampList)
import scipy.sparse as sps
URM_all = sps.coo_matrix((ratingList, (userList, itemList)))
URM_all = URM_all.tocsr()
URM_all
###Output
_____no_output_____
###Markdown
And now load the content informations in the same way: In this case we are using tags
###Code
ICM_path = "ml-10M100K/tags.dat"
ICM_file = open(ICM_path, 'r')
def rowSplit (rowString):
split = rowString.split("::")
split[3] = split[3].replace("\n","")
split[0] = int(split[0])
split[1] = int(split[1])
split[2] = str(split[2]) # tag is a string, not a float like the rating
split[3] = int(split[3])
result = tuple(split)
return result
ICM_file.seek(0)
ICM_tuples = []
for line in ICM_file:
ICM_tuples.append(rowSplit(line))
userList_icm, itemList_icm, tagList_icm, timestampList_icm = zip(*ICM_tuples)
userList_icm = list(userList_icm)
itemList_icm = list(itemList_icm)
tagList_icm = list(tagList_icm)
timestampList_icm = list(timestampList_icm)
userList_unique = list(set(userList_icm))
itemList_unique = list(set(itemList_icm))
tagList_unique = list(set(tagList_icm))
numUsers = len(userList_unique)
numItems = len(itemList_unique)
numTags = len(tagList_unique)
print ("Number of items\t {}, Number of users\t {}".format(numItems, numUsers))
print ("Number of tags\t {}, Number of item-tag tuples {}".format(numTags, len(tagList_icm)))
print("\nData example:")
print(userList_icm[0:10])
print(itemList_icm[0:10])
print(tagList_icm[0:10])
userList_unique
###Output
_____no_output_____
###Markdown
The numbers of items and users in the ICM matrix is different from what we saw in the URM, why? The tags are string, we should traslate them into numbers so we can use them as indices in the ICM
###Code
from sklearn import preprocessing
le = preprocessing.LabelEncoder()
le.fit(tagList_icm)
tagList_icm = le.transform(tagList_icm)
print(tagList_icm[0:10])
###Output
[ 9814 13375 14131 8515 10769 14070 7015 8071 14829 14900]
###Markdown
We can now build the ICM
###Code
import numpy as np
ones = np.ones(len(tagList_icm))
ICM_all = sps.coo_matrix((ones, (itemList_icm, tagList_icm)))
ICM_all = ICM_all.tocsr()
ICM_all
###Output
_____no_output_____
###Markdown
Ouch, we need to add three empty items (why?):
###Code
missing_items = np.zeros((3, numTags))
missing_items = sps.csr_matrix(missing_items)
ICM_all = sps.vstack((ICM_all, missing_items))
ICM_all
###Output
_____no_output_____
###Markdown
Let's take a look at the ICM
###Code
features_per_item = (ICM_all > 0).sum(axis=1)
items_per_feature = (ICM_all > 0).sum(axis=0)
print(features_per_item.shape)
print(items_per_feature.shape)
features_per_item = np.array(features_per_item).squeeze()
items_per_feature = np.array(items_per_feature).squeeze()
print(features_per_item.shape)
print(items_per_feature.shape)
features_per_item = np.sort(features_per_item)
items_per_feature = np.sort(items_per_feature)
import matplotlib.pyplot as pyplot
%matplotlib inline
pyplot.plot(features_per_item, 'ro')
pyplot.ylabel('Num features ')
pyplot.xlabel('Item Index')
pyplot.show()
pyplot.plot(items_per_feature, 'ro')
pyplot.ylabel('Num items ')
pyplot.xlabel('Feature Index')
pyplot.show()
###Output
_____no_output_____
###Markdown
We can now build the recommender algorithm, but first we need the train/test split and the evaluation function:
###Code
train_test_split = 0.80
numInteractions = URM_all.nnz
train_mask = np.random.choice([True,False], numInteractions, [train_test_split, 1-train_test_split])
userList = np.array(userList)
itemList = np.array(itemList)
ratingList = np.array(ratingList)
URM_train = sps.coo_matrix((ratingList[train_mask], (userList[train_mask], itemList[train_mask])))
URM_train = URM_train.tocsr()
test_mask = np.logical_not(train_mask)
URM_test = sps.coo_matrix((ratingList[test_mask], (userList[test_mask], itemList[test_mask])))
URM_test = URM_test.tocsr()
def precision(recommended_items, relevant_items):
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
precision_score = np.sum(is_relevant, dtype=np.float32) / len(is_relevant)
return precision_score
def recall(recommended_items, relevant_items):
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
recall_score = np.sum(is_relevant, dtype=np.float32) / relevant_items.shape[0]
return recall_score
def MAP(recommended_items, relevant_items):
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
# Cumulative sum: precision at 1, at 2, at 3 ...
p_at_k = is_relevant * np.cumsum(is_relevant, dtype=np.float32) / (1 + np.arange(is_relevant.shape[0]))
map_score = np.sum(p_at_k) / np.min([relevant_items.shape[0], is_relevant.shape[0]])
return map_score
def evaluate_algorithm(URM_test, recommender_object, at=5):
cumulative_precision = 0.0
cumulative_recall = 0.0
cumulative_MAP = 0.0
num_eval = 0
for i,user_id in enumerate(userList_unique):
if i % 500 == 0:
print("User %d of %d" % (i, len(userList_unique)))
relevant_items = URM_test[user_id].indices
if len(relevant_items)>0:
recommended_items = recommender_object.recommend(user_id, at=at)
num_eval+=1
cumulative_precision += precision(recommended_items, relevant_items)
cumulative_recall += recall(recommended_items, relevant_items)
cumulative_MAP += MAP(recommended_items, relevant_items)
cumulative_precision /= num_eval
cumulative_recall /= num_eval
cumulative_MAP /= num_eval
print("Recommender performance is: Precision = {:.4f}, Recall = {:.4f}, MAP = {:.4f}".format(
cumulative_precision, cumulative_recall, cumulative_MAP))
class BasicItemKNNRecommender(object):
""" ItemKNN recommender with cosine similarity and no shrinkage"""
def __init__(self, URM, k=50, shrinkage=100, similarity='cosine'):
self.dataset = URM
self.k = k
self.shrinkage = shrinkage
self.similarity_name = similarity
if similarity == 'cosine':
self.distance = Cosine(shrinkage=self.shrinkage)
elif similarity == 'pearson':
self.distance = Pearson(shrinkage=self.shrinkage)
elif similarity == 'adj-cosine':
self.distance = AdjustedCosine(shrinkage=self.shrinkage)
else:
raise NotImplementedError('Distance {} not implemented'.format(similarity))
def __str__(self):
return "ItemKNN(similarity={},k={},shrinkage={})".format(
self.similarity_name, self.k, self.shrinkage)
def fit(self, X):
item_weights = self.distance.compute(X)
item_weights = check_matrix(item_weights, 'csr') # nearly 10 times faster
print("Converted to csr")
# for each column, keep only the top-k scored items
# THIS IS THE SLOW PART, FIND A BETTER SOLUTION
values, rows, cols = [], [], []
nitems = self.dataset.shape[1]
for i in range(nitems):
if (i % 10000 == 0):
print("Item %d of %d" % (i, nitems))
this_item_weights = item_weights[i,:].toarray()[0]
top_k_idx = np.argsort(this_item_weights) [-self.k:]
values.extend(this_item_weights[top_k_idx])
rows.extend(np.arange(nitems)[top_k_idx])
cols.extend(np.ones(self.k) * i)
self.W_sparse = sps.csc_matrix((values, (rows, cols)), shape=(nitems, nitems), dtype=np.float32)
def recommend(self, user_id, at=None, exclude_seen=True):
# compute the scores using the dot product
user_profile = self.dataset[user_id]
scores = user_profile.dot(self.W_sparse).toarray().ravel()
# rank items
ranking = scores.argsort()[::-1]
if exclude_seen:
ranking = self._filter_seen(user_id, ranking)
return ranking[:at]
def _filter_seen(self, user_id, ranking):
user_profile = self.dataset[user_id]
seen = user_profile.indices
unseen_mask = np.in1d(ranking, seen, assume_unique=True, invert=True)
return ranking[unseen_mask]
###Output
_____no_output_____
###Markdown
We need to define Cosine similarity:
###Code
def check_matrix(X, format='csc', dtype=np.float32):
if format == 'csc' and not isinstance(X, sps.csc_matrix):
return X.tocsc().astype(dtype)
elif format == 'csr' and not isinstance(X, sps.csr_matrix):
return X.tocsr().astype(dtype)
elif format == 'coo' and not isinstance(X, sps.coo_matrix):
return X.tocoo().astype(dtype)
elif format == 'dok' and not isinstance(X, sps.dok_matrix):
return X.todok().astype(dtype)
elif format == 'bsr' and not isinstance(X, sps.bsr_matrix):
return X.tobsr().astype(dtype)
elif format == 'dia' and not isinstance(X, sps.dia_matrix):
return X.todia().astype(dtype)
elif format == 'lil' and not isinstance(X, sps.lil_matrix):
return X.tolil().astype(dtype)
else:
return X.astype(dtype)
import scipy
class ISimilarity(object):
"""Abstract interface for the similarity metrics"""
def __init__(self, shrinkage=10):
self.shrinkage = shrinkage
def compute(self, X):
pass
class Cosine(ISimilarity):
def compute(self, X):
# convert to csc matrix for faster column-wise operations
X = check_matrix(X, 'csc', dtype=np.float32)
# 1) normalize the columns in X
# compute the column-wise norm
# NOTE: this is slightly inefficient. We must copy X to compute the column norms.
# A faster solution is to normalize the matrix inplace with a Cython function.
Xsq = X.copy()
Xsq.data **= 2
norm = np.sqrt(Xsq.sum(axis=0))
norm = np.asarray(norm).ravel()
norm += 1e-6
# compute the number of non-zeros in each column
# NOTE: this works only if X is instance of sparse.csc_matrix
col_nnz = np.diff(X.indptr)
# then normalize the values in each column
X.data /= np.repeat(norm, col_nnz)
print("Normalized")
# 2) compute the cosine similarity using the dot-product
dist = X * X.T
print("Computed")
# zero out diagonal values
dist = dist - sps.dia_matrix((dist.diagonal()[scipy.newaxis, :], [0]), shape=dist.shape)
print("Removed diagonal")
# and apply the shrinkage
if self.shrinkage > 0:
dist = self.apply_shrinkage(X, dist)
print("Applied shrinkage")
return dist
def apply_shrinkage(self, X, dist):
# create an "indicator" version of X (i.e. replace values in X with ones)
X_ind = X.copy()
X_ind.data = np.ones_like(X_ind.data)
# compute the co-rated counts
co_counts = X_ind * X_ind.T
# remove the diagonal
co_counts = co_counts - sps.dia_matrix((co_counts.diagonal()[scipy.newaxis, :], [0]), shape=co_counts.shape)
# compute the shrinkage factor as co_counts_ij / (co_counts_ij + shrinkage)
# then multiply dist with it
co_counts_shrink = co_counts.copy()
co_counts_shrink.data += self.shrinkage
co_counts.data /= co_counts_shrink.data
dist.data *= co_counts.data
return dist
row = [0,5,2,6,8,6,7,5,2,2]
col = [0,4,5,2,2,5,3,2,4,1]
interaction = [1,1,1,1,1,1,1,1,1,1]
test = sps.coo_matrix((interaction, (row, col)))
test = test.tocsr()
test.todense()
test[2].indices
###Output
_____no_output_____
###Markdown
Test it:
###Code
rec = BasicItemKNNRecommender(URM=URM_train, shrinkage=0.0, k=50)
rec.fit(ICM_all)
for user_id in userList_unique[0:10]:
print(rec.recommend(user_id, at=5))
evaluate_algorithm(URM_test, rec)
###Output
_____no_output_____
###Markdown
Shrinkage
###Code
rec_s = BasicItemKNNRecommender(URM=URM_train, shrinkage=10.0, k=50)
rec_s.fit(ICM_all)
evaluate_algorithm(URM_test, rec_s)
###Output
_____no_output_____
###Markdown
IDF
###Code
num_tot_items = ICM_all.shape[0]
# let's count how many items have a certain feature
items_per_feature = (ICM_all > 0).sum(axis=0)
IDF = np.array(np.log(num_tot_items / items_per_feature))[0]
print(ICM_all.shape)
print(IDF.shape)
ICM_idf = ICM_all.copy()
# compute the number of non-zeros in each col
# NOTE: this works only if X is instance of sparse.csc_matrix
col_nnz = np.diff(check_matrix(ICM_idf, 'csc').indptr)
print(col_nnz.shape)
print(ICM_idf.shape)
print(IDF.shape)
# then normalize the values in each col
ICM_idf.data *= np.repeat(IDF, col_nnz)
rec_idf = BasicItemKNNRecommender(URM=URM_train, shrinkage=0.0, k=50)
rec_idf.fit(ICM_idf)
evaluate_algorithm(URM_test, rec_idf)
###Output
_____no_output_____
###Markdown
Unnormalized similarity matrix
###Code
rec_test = BasicItemKNNRecommender(URM=URM_train, shrinkage=0.0, k=50)
rec_test.W_sparse = ICM_all * ICM_all.T
evaluate_algorithm(URM_test, rec_test)
###Output
_____no_output_____
|
pymaceuticals_starter.v2ipynb.ipynb
|
###Markdown
Observations and Insights
###Code
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import math
import scipy.stats as st
from scipy.stats import sem
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
# Combine the data into a single dataset
study = pd.merge(study_results, mouse_metadata)
# Display the data table for preview
group = pd.merge(study_results, mouse_metadata)
study2 = study.loc[study.duplicated(subset=['Mouse ID', 'Timepoint']),'Mouse ID'].unique()
study1 = study[study['Mouse ID'].isin(study2)==False]
study1.drop_duplicates(['Mouse ID']).head(5)
# Checking the number of mice.
study1.nunique()['Mouse ID']
study2
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
study = study.drop_duplicates()
#print(study)
study.describe()
test =study[study['Mouse ID'] == ('g989')]
test
# Optional: Get all the data for the duplicate mouse ID.
Mean = study1.groupby(['Drug Regimen','Timepoint']).mean()['Tumor Volume (mm3)']
mean_1 = pd.DataFrame(Mean)
mean_1
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
clean = study1[['Mouse ID','Timepoint','Drug Regimen', 'Tumor Volume (mm3)']].drop_duplicates(['Mouse ID'])
clean
study1['Drug Regimen'].drop_duplicates()
# Checking the number of mice in the clean DataFrame.
Me= study.groupby(['Drug Regimen','Timepoint']).median()['Tumor Volume (mm3)']
vari = study.groupby(['Drug Regimen','Timepoint']).var()['Tumor Volume (mm3)']
stan = study.groupby(['Drug Regimen','Timepoint']).std()['Tumor Volume (mm3)']
s= study.groupby(['Drug Regimen','Timepoint']).sem()['Tumor Volume (mm3)']
Stat = pd.DataFrame({'Mean Tumor Volume': Mean,
'Median Tumor Volume': Me,
'Tumor Volume Variance': vari,
'Tumor Volume Std. Dev.': stan,
'Tumor Volume Std Err.': s})
Stat
## Summary Statistics
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
Su = pd.DataFrame({'Mean Tumor Volume': study.groupby(['Drug Regimen']).mean()['Tumor Volume (mm3)'],
'Median Tumor Volume': study.groupby(['Drug Regimen']).median()['Tumor Volume (mm3)'],
'Tumor Volume Variance': study.groupby(['Drug Regimen']).var()['Tumor Volume (mm3)'],
'Tumor Volume Std. Dev.': study.groupby(['Drug Regimen']).std()['Tumor Volume (mm3)'],
'Tumor Volume Std Err.': study.groupby(['Drug Regimen']).sem()['Tumor Volume (mm3)']})
Su
# This method is the most straighforward, creating multiple series and putting them all together at the end.
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
I = pd.MultiIndex.from_tuples([('Tumor Volume (mm3)','Mean','Median','Var','Std.Dev','Std.Err.')])
I
Tab = pd.DataFrame({'Mean': study.groupby(['Drug Regimen']).mean()['Tumor Volume (mm3)'],
'Median': study.groupby(['Drug Regimen']).median()['Tumor Volume (mm3)'],
'Variance': study.groupby(['Drug Regimen']).var()['Tumor Volume (mm3)'],
'Std. Dev.': study.groupby(['Drug Regimen']).std()['Tumor Volume (mm3)'],
'Std Err.': study.groupby(['Drug Regimen']).sem()['Tumor Volume (mm3)']})
Tab
# This method produces everything in a single groupby function
###Output
_____no_output_____
###Markdown
Bar and Pie Charts
###Code
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pandas.
bar = study1['Drug Regimen'].value_counts()
bar
plt.bar(bar.index.values,bar.values, color='steelblue')
plt.xticks(rotation="vertical")
plt.xlabel('Drug Regimen')
plt.ylabel('Number of Data Points')
bar
# Generate a bar plot showing the total number of mice for each treatment throughout the course of the study using pyplot.
bars = study1['Drug Regimen'].value_counts()
bars
# x_axis =('Capomulin', 'Ramicane', 'Ketapril', 'Naftisol', 'Zoniferol', 'Placebo', 'Stelasyn', 'Ceftamin', 'Infubinol', 'Propriva')
#plt.bar(x_axis,bars, color='steelblue', alpha=0.90, align="center", width=.6)
bars.plot(kind= "bar")
plt.xticks(rotation="vertical")
plt.ylabel('Number of Data Points')
plt.xlabel('Drug Regimen')
plt.show()
# Generate a pie plot showing the distribution of female versus male mice using pandas
Gen = study1['Sex'].value_counts()
Gen.sum()
M = Gen['Male']/Gen.sum()
M
F =Gen['Female']/Gen.sum()
F
plot = round(Gen).plot.pie(autopct='%.2f')
# Generate a pie plot showing the distribution of female versus male mice using pyplot
Gen = study1['Sex'].value_counts()
round(Gen)
labels = ["Male","Female"]
explode = (0, 0, 0, 0)
plt.pie(Gen,labels= labels,autopct='%.2f',shadow=True)
plt.ylabel('Sex')
plt.show()
T =study1[(study1["Drug Regimen"]== 'Capomulin')]
x= study1.groupby(['Mouse ID'])['Timepoint'].max()
merg = study1.merge(x,on= ['Mouse ID',"Timepoint"])
## Quartiles, Outliers and Boxplots
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
q = merg[(merg["Drug Regimen"]== 'Ramicane')]
o =merg[(merg["Drug Regimen"]== 'Infubinol')]
p= merg[(merg["Drug Regimen"]== 'Ceftamin')]
T1 =merg[(merg["Drug Regimen"]== 'Capomulin')]
# Start by getting the last (greatest) timepoint for each mouse
x= study1.groupby(['Mouse ID'])['Timepoint'].max()
x
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
merg = study1.merge(x,on= ['Mouse ID',"Timepoint"])
merg
q
o
p
# Put treatments into a list for for loop (and later for plot labels)
# Create empty list to fill with tumor vol data (for plotting)
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# quartiles = study1.quantile([.25,.5,.75])
# lowerq = quartiles[0.25]
# upperq = quartiles[0.75]
# iqr = upperq-lowerqquartiles
# Locate the rows which contain mice on each drug and get the tumor volumes
t3 = T1[['Tumor Volume (mm3)','Timepoint','Drug Regimen']]
p1= p[['Tumor Volume (mm3)','Timepoint','Drug Regimen']]
o1= o[['Tumor Volume (mm3)','Timepoint','Drug Regimen']]
q1= q[['Tumor Volume (mm3)','Timepoint','Drug Regimen']]
# add subset
# Determine outliers using upper and lower bounds
# lower_bound = lowerq - (1.5*iqr)
# upper_bound = upperq + (1.5*iqr)
t3
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
Tumor= q1['Tumor Volume (mm3)'], t3['Tumor Volume (mm3)'], o1['Tumor Volume (mm3)'], p1['Tumor Volume (mm3)']
plt.subplots()
plt.title('Tumor Volume of Mice')
plt.ylabel('Final Tumor Volume (mm3)')
# plt.xticks([Capomulin, Ramicane, Infubinol, Ceftamin])
plt.boxplot(Tumor)
plt.show()
###Output
_____no_output_____
###Markdown
Line and Scatter Plots
###Code
# Generate a line plot of time point versus tumor volume for a mouse treated with Capomulin
lin = study1[['Tumor Volume (mm3)','Timepoint','Drug Regimen']]
lin
s= lin.iloc[0:10,2]
l =lin.iloc[0:10,1]
e=lin.iloc[0:10,0]
e1 =[45.00, 45.65, 43.27, 43.78, 42.73, 43.26, 40.60, 37.96, 38.37, 38.98]
l1 =[0,5,10,15,20,25,30,35,40,45]
c= pd.DataFrame({'Time Point':l,'Tumor Volume(mm3)':e,'Drug Regimen':s})
c
plt.title=('Capomulin Treatment of mouse')
plt.ylabel=('Timepoint (days')
plt.xlabel=('Tumor Volume (mm3)')
plt.plot(l1,e1,linewidth=4)
plt.show()
#help
Row = T[T['Mouse ID']==('l509')]
Row
Row.plot.line(x='Timepoint', y= 'Tumor Volume (mm3)')
h = T.groupby(['Mouse ID'])['Timepoint']
i = T.groupby(['Mouse ID'])['Tumor Volume (mm3)']
#plt.plot(i,h)
#plt.show()
scat =T[['Tumor Volume (mm3)','Timepoint','Drug Regimen']]
scat
p = group.pivot_table(index='Timepoint', columns= 'Drug Regimen', values='Tumor Volume (mm3)')
p
#plt.plot(p['Capomulin'])
# Generate a scatter plot of mouse weight versus average tumor volume for the Capomulin regimen
T =group[(group["Drug Regimen"]== 'Capomulin')]
T
new = T.groupby(['Mouse ID'])['Weight (g)'].mean()
news = T.groupby(['Mouse ID'])['Tumor Volume (mm3)'].mean()
plt.scatter(new,news)
plt.xlabel("Weight of Mouse")
plt.ylabel("Tumor Volume")
plt.show()
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
slope, int, r_value, p_value, std_err = st.linregress(new,news)
fit = slope * new + int
plt.scatter(new,news)
#plt.xlabel("Weight of Mouse")
#plt.ylabel("Tumor Volume")
plt.plot(new,fit,"", color='r')
plt.xticks(new, rotation=90)
plt.show()
corr = round(st.pearsonr(new,news)[0],2)
print(f'The correlation between weight and tumor value is {corr}')
###Output
_____no_output_____
|
book/community/templates/template-environments-modelling.ipynb
|
###Markdown
[Modelling target]:::{eval-rst}:opticon:`tag`:badge:`[Environment],badge-primary`:badge:`Modelling,badge-secondary`::: Context Purpose*Describe the purpose of the use case.* Modelling approach*Describe the most relevant features of the modelling approach.* Highlights*Provide 3-5 bullet points that convey the use case’s core procedures. Each bullet point must have a maximum of 85 characters, including spaces.** Highlight 1* Highlight 2 Contributions NotebookAuthor (role), Affiliation, GitHub alias Modelling codebaseAuthor (role), Affiliation, GitHub alias Modelling publications```{bibliography} :style: plain :list: bullet :filter: topic % "replace by the `topic` entry linked to the publication(s) in the `_bibliography/references.bib` file"``` Modelling fundingIndicate details of the funding.:::{note}*Optional: add credits or acknowledgements to data providers or authors of code snippets*::: Install and load libraries*For installation, add only libraries not listed in the [environment.yml](https://github.com/alan-turing-institute/environmental-ds-book/blob/master/environment.yml) file, but required by the notebook. Libraries can be installed in silent mode e.g. `pip -q install `**For loading libraries, order them according to their role e.g. libraries to manipulate folders i.e. os (first), handle data i.e. numpy, xarray (second), visualisation e.g. holoviews (third), etc. The cell below contains two libraries, `os` and `warning` which are common among the notebooks. Don't remove them.*
###Code
import os
import warnings
warnings.filterwarnings(action='ignore')
###Output
_____no_output_____
###Markdown
Set project structure*The cell below creates a separate folder to save the notebook outputs. This facilitates the reader to inspect inputs/outputs stored within a defined destination folder. Change `` with your notebook identifier.*
###Code
notebook_folder = '../modelling/<replace-by-notebook-filename>'
if not os.path.exists(notebook_folder):
os.makedirs(notebook_folder)
###Output
_____no_output_____
###Markdown
Load data*Load full dataset from original or mirror sources. If the license of the dataset permits, we suggest creating sample data (preprocessed) for the notebook stored in a data repository e.g. Zenodo.* Modelling*Load the model from a data repository e.g. Zenodo.* Outputs*Provide a brief inspection of the model outputs and their interpretation* Summary*Provide 3-5 bullet points summarising the main aspects of the model and tools covered in the notebook.* * Sentence 1 e.g. `tool-name` to perform...* Sentence 2 e.g. `tool-name` to perform... Additional information**License**: The code in this notebook is licensed under the MIT License. The Environmental Data Science book is licensed under the Creative Commons by Attribution 4.0 license. See further details [here](https://github.com/alan-turing-institute/environmental-ds-book/blob/master/LICENSE.md).**Contact**: If you have any suggestion or report an issue with this notebook, feel free to [create an issue](https://github.com/alan-turing-institute/environmental-ds-book/issues/new/choose) or send a direct message to [[email protected]](mailto:[email protected]).
###Code
from datetime import date
print(f'Last tested: {date.today()}')
###Output
_____no_output_____
|
notebooks/HSE_ML_course/HSE_ML_week4_task2.ipynb
|
###Markdown
Инструкция по выполнению 1. Загрузите данные close_prices.csv. В этом файле приведены цены акций 30 компаний на закрытии торгов за каждый день периода. 2. На загруженных данных обучите преобразование PCA с числом компоненты равным 10. Скольких компонент хватит, чтобы объяснить 90% дисперсии? 3. Примените построенное преобразование к исходным данным и возьмите значения первой компоненты. 4. Загрузите информацию об индексе Доу-Джонса из файла djia_index.csv. Чему равна корреляция Пирсона между первой компонентой и индексом Доу-Джонса? 5. Какая компания имеет наибольший вес в первой компоненте? Укажите ее название с большой буквы.Если ответом является нецелое число, то целую и дробную часть необходимо разграничивать точкой, например, 0.42. При необходимости округляйте дробную часть до двух знаков.Ответ на каждое задание — текстовый файл, содержащий ответ в первой строчке. Обратите внимание, что отправляемые файлы не должны содержать перевод строки в конце. Данный нюанс является ограничением платформы Coursera. Мы работаем над тем, чтобы убрать это ограничение.
###Code
import pandas as pd
from pathlib import Path
path = Path.cwd()
path = path.joinpath('../data/raw/HSE_ML_week4')
data = pd.read_csv(path.joinpath('close_prices.csv'))
data.head(10)
import numpy as np
from sklearn.decomposition import PCA
X = data.copy()
del X['date']
pca = PCA(n_components=10)
pca.fit(X)
np.add.accumulate(pca.explained_variance_ratio_)
file = open('/home/topcoder2k/HSE_ML/HSE_ML_week4_answers/pca_90.txt', 'w')
file.write('4')
file.close()
X_reduced = pca.transform(X)
component1 = X_reduced[:, 0]
djia_index = pd.read_csv(path.joinpath('djia_index.csv'))
np.corrcoef(component1, djia_index['^DJI'])
file = open('/home/topcoder2k/HSE_ML/HSE_ML_week4_answers/corrcoef.txt', 'w')
file.write('0.91')
file.close()
max_var = abs(pca.components_[0]).max()
for i in range(pca.components_[0].size):
if abs(pca.components_[0][i] - max_var) < 0.001:
print(i)
file = open('/home/topcoder2k/HSE_ML/HSE_ML_week4_answers/important_company.txt', 'w')
file.write('V')
file.close()
###Output
_____no_output_____
|
06. Python for Data Analysis - Pandas/6.6 merging_joining_and_concatenating.ipynb
|
###Markdown
Merging, Joining, and Concatenating> There are 3 main ways of combining DataFrames together: Merging, Joining and Concatenating. In this lecture we will discuss these 3 methods with examples. Example DataFrames
###Code
import pandas as pd
# For having gridlines
%%HTML
<style type="text/css">
table.dataframe td, table.dataframe th {
border: 1px black solid !important;
color: black !important;
}
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
df1 # DataFrame with index 0 to 3
df2 # DataFrame with index 4 to 7
df3 # DataFrame with index 8 to 11
###Output
_____no_output_____
###Markdown
ConcatenationConcatenation basically glues together DataFrames. Keep in mind that dimensions should match along the axis you are concatenating on. You can use **pd.concat** and pass in a list of DataFrames to concatenate together:
###Code
pd.concat([df1,df2,df3]) # By default axes to join together is 0 i.e. joins rows together.
# To concatenate along the columns specify axis = 1
pd.concat([df1,df2,df3],axis=1) # Wherever DataFrames were having missing values, we see NaN.
# Most often we join columns together i.e. along axis = 1. Know your data before concatenating!
###Output
_____no_output_____
###Markdown
_____ Example DataFrames
###Code
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
left
right
###Output
_____no_output_____
###Markdown
MergingThe **merge** function allows you to merge DataFrames together using a similar logic as merging SQL Tables together. For example: * By default how = inner.* on : When merging we want to merge on a key column, and we can pass on one or more key column.* What on does is that it combines and merges dataframes together, on the key column. Instead of concatenating the columns we would have 2 key columns and we can merge or share them on the same key column.* pd.merge() instead of glueing the dataframe together, we see where values match up on the column and then create rows using key column.
###Code
pd.merge(left,right,how='inner',on='key')
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
left
right
pd.merge(left,right,on=['key1','key2']) # By default merges on how = "inner"
pd.merge(left, right, how='outer', on=['key1', 'key2'])
pd.merge(left, right, how='left', on=['key1', 'key2'])
pd.merge(left, right, how='right', on=['key1', 'key2'])
###Output
_____no_output_____
###Markdown
JoiningJoining is a convenient method for combining the columns of two potentially differently-indexed DataFrames into a single result DataFrame.
###Code
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
left
right
left.join(right)
###Output
_____no_output_____
###Markdown
* Does an inner join between left and right based off of the index keys.* If you wanted to merge the 2 dataframes based on their columns then go for merge.* Same as merge except the keys you want to join are on index rather than on column.* Index is the keys we are joining on.
###Code
left.join(right, how='outer') # inner, left, right.
# For advanced SQL users who are familiar with that terminology.
###Output
_____no_output_____
###Markdown
Merging, Joining, and Concatenating> There are 3 main ways of combining DataFrames together: Merging, Joining and Concatenating. In this lecture we will discuss these 3 methods with examples. Example DataFrames
###Code
import pandas as pd
# For having gridlines
%%HTML
<style type="text/css">
table.dataframe td, table.dataframe th {
border: 1px black solid !important;
color: black !important;
}
df1 = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
df2 = pd.DataFrame({'A': ['A4', 'A5', 'A6', 'A7'],
'B': ['B4', 'B5', 'B6', 'B7'],
'C': ['C4', 'C5', 'C6', 'C7'],
'D': ['D4', 'D5', 'D6', 'D7']},
index=[4, 5, 6, 7])
df3 = pd.DataFrame({'A': ['A8', 'A9', 'A10', 'A11'],
'B': ['B8', 'B9', 'B10', 'B11'],
'C': ['C8', 'C9', 'C10', 'C11'],
'D': ['D8', 'D9', 'D10', 'D11']},
index=[8, 9, 10, 11])
df1 # DataFrame with index 0 to 3
df2 # DataFrame with index 4 to 7
df3 # DataFrame with index 8 to 11
###Output
_____no_output_____
###Markdown
ConcatenationConcatenation basically glues together DataFrames. Keep in mind that dimensions should match along the axis you are concatenating on. You can use **pd.concat** and pass in a list of DataFrames to concatenate together:
###Code
pd.concat([df1,df2,df3]) # By default axes to join together is 0 i.e. joins rows together.
# To concatenate along the columns specify axis = 1
pd.concat([df1,df2,df3],axis=1) # Wherever DataFrames were having missing values, we see NaN.
# Most often we join columns together i.e. along axis = 1. Know your data before concatenating!
###Output
_____no_output_____
###Markdown
_____ Example DataFrames
###Code
left = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key': ['K0', 'K1', 'K2', 'K3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
left
right
###Output
_____no_output_____
###Markdown
MergingThe **merge** function allows you to merge DataFrames together using a similar logic as merging SQL Tables together. For example: * By default how = inner.* on : When merging we want to merge on a key column, and we can pass on one or more key column.* What on does is that it combines and merges dataframes together, on the key column. Instead of concatenating the columns we would have 2 key columns and we can merge or share them on the same key column.* pd.merge() instead of glueing the dataframe together, we see where values match up on the column and then create rows using key column.
###Code
pd.merge(left,right,how='inner',on='key')
left = pd.DataFrame({'key1': ['K0', 'K0', 'K1', 'K2'],
'key2': ['K0', 'K1', 'K0', 'K1'],
'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3']})
right = pd.DataFrame({'key1': ['K0', 'K1', 'K1', 'K2'],
'key2': ['K0', 'K0', 'K0', 'K0'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']})
left
right
pd.merge(left,right,on=['key1','key2']) # By default merges on how = "inner"
pd.merge(left, right, how='outer', on=['key1', 'key2'])
pd.merge(left, right, how='left', on=['key1', 'key2'])
pd.merge(left, right, how='right', on=['key1', 'key2'])
###Output
_____no_output_____
###Markdown
JoiningJoining is a convenient method for combining the columns of two potentially differently-indexed DataFrames into a single result DataFrame.
###Code
left = pd.DataFrame({'A': ['A0', 'A1', 'A2'],
'B': ['B0', 'B1', 'B2']},
index=['K0', 'K1', 'K2'])
right = pd.DataFrame({'C': ['C0', 'C2', 'C3'],
'D': ['D0', 'D2', 'D3']},
index=['K0', 'K2', 'K3'])
left
right
left.join(right)
###Output
_____no_output_____
###Markdown
* Does an inner join between left and right based off of the index keys.* If you wanted to merge the 2 dataframes based on their columns then go for merge.* Same as merge except the keys you want to join are on index rather than on column.* Index is the keys we are joining on.
###Code
left.join(right, how='outer') # inner, left, right.
# For advanced SQL users who are familiar with that terminology.
###Output
_____no_output_____
|
Chapter01/Chapter 1 - Anatomy of Matplotlib.ipynb
|
###Markdown
IntroductionMatplotlib is a cross-platform python library for plotting 2D graphs(also called as plots). It can be used in a variety of user interfaces python scripts, IPython shells, Jupyter Notebooks, web applications. It can be used to develop interactive reporting applications, customized to develop complex dash board and imaging applications. It supports saving figures into various hard copy formats as well. It also has limited support for 3D figures. It also support many 3rd party extensions to extend its functionality. Architecture of MatplotlibMatplotlib has a 3 layer architecture, Backend, Artist and Scripting, organised logically as a stack. Scripting is an API using which developers create the graphs. Artist is the one which does actual job of creating the graph internally. Backend is where the graph is displayed. Backend LayerThis is the bottom most layer where the graphs are displyed onto an output device. This can be any of the User Interfaces that Matplotlib supports. These are two types of Backends. User Interface Backends(for use in pygtk, wxpython, tkinter, qt4, or macosx etc. also referred to as “interactive backends”), and Hardcopy Backends to make image files (PNG, SVG, PDF, PS; also referred to as “non-interactive backends”). We will see how to configure to use any of these backends in later chapters Artist LayerThis is the middle layer of the stack. Matplotlib uses the artist object to draw various elements of the graph. So every element(see below elements of a Figure) we see in the graph is an artist. This layer provides object oriented API for plotting gaphs with maximum flexibility. This interface is meant for seasoned Python programmers, who can create complex dashboard applications. Scripting LayerThis is the topmost layer of the stack. This layer provides a simple interface for creating graphs. This is meant for use by end users who don't have much programming expertise. This is called as pyplot API. Elements of a FigureThe high level matplotlib object that contains all the elements of the output graph is called $Figure$. Multiple graphs can be arranged in different ways to form a Figure. Each of the Figure elements is customizable. FigureThe whole picture shown above is called Figure. It contains all the elements of the picture. AxesAxes is a sub-section of the Figure, where a graph is plotted. Axes has a title, x-label and y-label. A Figure can have many such Axes, each representing one or more graphs. In the above Figure, there is only one Axes, two line graphs in blue and red colours AxisThese are number lines representing the scale of the graphs being plotted. 2D graphs have X-axis and Y-axis, 3D graphs have X-axis, Y-axis and Z-axis. Don't get confused between Axes and Axis. Axis is an element of Axes. LabelIt is the name given to various elements of the figure, e.g. X-axis label, Y-axis label, graph label(Blue Signal, Red Signal in the above Figure) etc. LegendWhen there are multiple graphs in the Axes(as in the above figure), each of them has its own label, and all these labels are represented as a legend. In the above figure Legend is placed at the top right corner of the figure. TitleIt is the name given to each of the Axes. Figure also can have its own title, when the Figure has multiple Axes with their own titles. Above Figure has only one Axes, so there is only title for the Axes as well as Figure Tick LabelsEach of the axis(X, Y or Z) would have a range of values that are divided into many equal bins. Bins are chosen at two levels. In the above Figure X-axis scale ranges from 0 to 4, divided into 4 Major bins(0, 1, 2, 3, and 4) and each of the major bins further divided into 4 minor bins (0, 0.25, 0.5, 0.75). Major bins are called Major Tick and Minor Bins are called Minor Tick, and the names given to them are called Major tick label and Minor tick label. SpinesBoundaries of the figure are called spines. There will be 4 spines for each figure(top, bottom, left and right). GridFor easier readability of co-ordinates of various points on the graph, the area of the graph is divided into a grid. Usually this grid is drawn along major ticks of X and Y axis. In the above figure, Grid is shown in dashed lines. Interactive and non-interactive modesIn the interactive mode, graph gets updated in the backend after each statement. In the non-interactive mode, graph does not get displayed until explicitly asked to do so. Using the following commands, interactive mode can be set on or off, and also checked for current mode at any point in time.- matplotlib.pyplot.ion() to set the interactive mode ON- matplotlib.pyplot.ioff() to switch off interactive mode- matplotlib.is_interactive() to check whether interactive mode is ON(True) or OFF(False)
###Code
# Set the backend as the screen output
%matplotlib inline
# Import matplotlib and pyplot libraries. It is common practice in python to import libraries with crisp synonyms.
# plt is the synonym for matplotlib.pyplot package
import matplotlib
import matplotlib.pyplot as plt
# Set the interactive mode to ON
plt.ion()
# Check the status of interactive mode
matplotlib.is_interactive()
###Output
_____no_output_____
###Markdown
Plot a line graph connecting two points (0,1.5) and (1,3). The plot command expects two list/numpy array arguments for X and Y axis respectively. If you pass only one argument, it takes it as Y-axis and for X-axis it takes default range of 0 to 1. In the below example we are passing only one list of two points, which will be taken as Y-axis co-ordinates. For X-axis it takes the default values of 0 and 1 respectively.
###Code
plt.plot([1.5, 3.0]);
# Plot a line graph
plt.plot([1.5, 3.0]);
# Add labels and title
plt.title("Interactive Plot")
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
# Plot a line graph
plt.plot([1.5, 3.0])
# Add labels and title
plt.title("Interactive Plot");
plt.xlabel("X-axis")
plt.ylabel("Y-axis")
# Add One more line graph
plt.plot([3.5, 2.5])
###Output
_____no_output_____
###Markdown
__Note:__ Older versions of matplotlib or certain backends(e.g. macosx), graph may not be updated immediately. In such cases, you need to call plt.draw() explicitly at the end, so that graph gets displayed. Non-Interactive Mode
###Code
# Start the kernel afresh, and import matplotlib and pyplot libraries
import matplotlib
import matplotlib.pyplot as plt
# Set the interactive mode to OFF
plt.ioff()
# Check the status of interactive mode
matplotlib.is_interactive()
# Plot a line graph
plt.plot([1.5, 3.0]);
###Output
_____no_output_____
###Markdown
No output is displayed! To display the graph on the screen, you need to explicitly call the function plt.show()
###Code
# Display the graph on the screen
plt.show()
###Output
_____no_output_____
###Markdown
External Data for PlottingBy default matplotlib accepts input data as python list or Numpy array only. So all external data needs to be read and converted to either python list or Numpy array before feeding it to matplotlib for plotting the graphs. If the data is in txt file, you can use python functions to read the data and put it in Numpy array. If the data is in csv, xlsx formats you can use pandas library to read the data and put it into Numpy arrays. Reading a text file and plotting a line graph
###Code
# Import numpy library with synonym np. numpy is a library to manage n-dimensional arrays supporting all
# mathematical operations on these arrays
import numpy as np
# test.txt is a text file with 10 numbers separated by comma, representing x, y co-ordinates of 5 points(1,1),
# (2,4), (3,9), (4,16) and (5,25) in 2D space. loadtxt() is a numpy function to load text data into numpy array
txt = np.loadtxt('test.txt', delimiter = ',')
txt
# convert the flat array into 5 points in 2D space
txt = txt.reshape(5,2)
txt
# Separate x and y axis points. x is the first column in txt1 and y is second column. Python indexing starts with 0
x = txt[:,0]
y = txt[:,1]
print(x, y)
# import pyplot library
import matplotlib.pyplot as plt
# plot the graph
plt.plot(x, y)
# Display the graph on the screen
plt.show()
###Output
_____no_output_____
###Markdown
Reading a csv file and plotting a graph
###Code
# .csv file has relational database structure of rows and columns. test.csv file has x,y co-ordiantes for 5 points
# in 2D space. Each point is a row in the file, with 2 columns x and y. Same numpy loadtxt() function is used to load data
x, y = np.loadtxt ('test.csv', unpack = True, usecols = (0,1), delimiter = ',')
print(x)
print(y)
# import pyplot library
import matplotlib.pyplot as plt
# plot the graph
plt.plot(x, y)
# Display the graph on the screen
plt.show()
###Output
_____no_output_____
###Markdown
Reading an xlsx file and plotting a graphxlsx file format is not supported by numpy. Pandas is a python data processing package that can be used to read xlsx files.
###Code
# import numpy package with np as synonym
import numpy as np
# import pandas package with pd as synonym
import pandas as pd
#Read xlsx file into pandas dataframe. This file has same 5 points in 2D space, each in a separate row with x, y columns
df = pd.read_excel('test.xlsx', 'sheet', header=None)
# Convert pandas dataframe to numpy array
data_array = np.array(df)
print(data_array)
# Extract x , y co-ordinates from the numpy array
x , y = data_array[:,0], data_array[:,1]
print(x,y)
# import pyplot library
import matplotlib.pyplot as plt
# plot the graph
plt.plot(x, y)
# Display the graph on the screen
plt.show()
###Output
_____no_output_____
###Markdown
Environment Variables Setupmatplotlib uses matplotlibrc file to store default values for various parameters. It covers all the customisable parameters, hence, this file is very long. You can refer to matplotlib online documentation to get complete list of parameters. matplotlib.rcParams command is used to change these default values to any other supported values, one parameter at a time. matplotlib.rc command is used to set default values for multiple parameters within a specific group. Finally, matplotlib.rcdefaults() command is used to restore default paramaters. __Note:__ matplotlib.rcsetup() command is used internally by matplotlib to validate the parameters being changed are acceptable values
###Code
# Get the location of matplotlibrc file
import matplotlib
matplotlib.matplotlib_fname()
# .csv file has relational database structure of rows and columns. test.csv file has x,y co-ordiantes for 5 points
# in 2D space. Each point is a row in the file, with 2 columns x and y. Same numpy loadtxt() function is used to load data
x, y = np.loadtxt ('test.csv', unpack = True, usecols = (0,1), delimiter = ',')
import matplotlib.pyplot as plt
# changing default values for multiple parameters within the group 'lines'
matplotlib.rc('lines', linewidth=4, linestyle='-', marker='*')
#changing default values for parameters individually
matplotlib.rcParams['lines.markersize'] = 20
matplotlib.rcParams['font.size'] = '15.0'
#Plot the graph
plt.plot(x,y)
# Display the graph
plt.show()
# To restore all default parameters
matplotlib.rcdefaults()
plt.plot(x,y)
plt.show()
###Output
_____no_output_____
|
6-machine-learning/Problem-1.ipynb
|
###Markdown
Problem 1Apply your skills to classify protein foldType with Decision Tree Classifier Imports
###Code
from mmtfPyspark.ml import SparkMultiClassClassifier, datasetBalancer
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
import mltoolkit
###Output
_____no_output_____
###Markdown
Configure Spark Session
###Code
spark = SparkSession.builder \
.master("local[*]") \
.appName("datasetClassifierProblemset") \
.getOrCreate()
###Output
_____no_output_____
###Markdown
TODO-1: Read in data from parquet file
###Code
parquetFile = './input_features/'
data = # Your Code Here #
###Output
_____no_output_____
###Markdown
TODO-2: Select alpha, beta, alpha+beta foldtypes
###Code
data = # Your Code Here #
print(f"Total number of data: {data.count()}")
###Output
Total number of data: 2390
###Markdown
TODO-3: Downsample data
###Code
label = 'foldType'
data = # Your Code Here #
print(f"Dataset size (balanced) : {data.count()}")
data.groupby(label).count().show()
###Output
Dataset size (balanced) : 1266
+--------+-----+
|foldType|count|
+--------+-----+
| beta| 626|
| alpha| 640|
+--------+-----+
###Markdown
TODO-4: Decision Tree Classifier with PySpark
###Code
from pyspark.ml.classification import DecisionTreeClassifier
dtc = # Your Code Here: Make Decision Tree Classifier Class #
mcc = # Your Code Here: Use MulticlassClassifier wrapper on dtc#
matrics = # Your Code Here: fit data#
for k,v in matrics.items(): print(f"{k}\t{v}")
###Output
Class Train Test
alpha 562 78
beta 561 65
Sample predictions: DecisionTreeClassifier
+----------------+-----------+----------+---------+--------+--------------------+------------+-------------+--------------------+----------+--------------+
|structureChainId| alpha| beta| coil|foldType| features|indexedLabel|rawPrediction| probability|prediction|predictedLabel|
+----------------+-----------+----------+---------+--------+--------------------+------------+-------------+--------------------+----------+--------------+
| 2PW8.I|0.048387095|0.30645162|0.6451613| beta|[0.34626930754166...| 1.0| [13.0,322.0]|[0.03880597014925...| 1.0| beta|
| 4Q1Q.B| 0.0| 0.5| 0.5| beta|[-0.1299340440187...| 1.0| [13.0,322.0]|[0.03880597014925...| 1.0| beta|
| 5F6L.J| 0.0|0.21052632|0.7894737| beta|[0.28061876961818...| 1.0| [23.0,1.0]|[0.95833333333333...| 0.0| alpha|
| 5INB.B| 0.0|0.18181819|0.8181818| beta|[0.20785025656223...| 1.0| [33.0,2.0]|[0.94285714285714...| 0.0| alpha|
| 1OGO.X|0.034965035|0.42482516|0.5402098| beta|[0.00471779905128...| 1.0| [13.0,322.0]|[0.03880597014925...| 1.0| beta|
+----------------+-----------+----------+---------+--------+--------------------+------------+-------------+--------------------+----------+--------------+
only showing top 5 rows
Total time taken: 4.23888897895813
Method DecisionTreeClassifier
AUC 0.8371794871794872
F 0.8390496581875636
Accuracy 0.8391608391608392
Precision 0.8390211449942462
Recall 0.8391608391608392
False Positive Rate 0.16480186480186482
True Positive Rate 0.8391608391608392
Confusion Matrix
['alpha', 'beta']
DenseMatrix([[67., 11.],
[12., 53.]])
###Markdown
BONUS: Decision Tree Classifier with sklearn
###Code
from sklearn.tree import DecisionTreeClassifier
df = # Your Code Here: convert data to Pandas Dataframe #
dtc = # Your Code Here: Make Decision Tree Classifier Class #
mcc = # Your Code Here: Use MulticlassClassifier wrapper on dtc#
matrics = # Your Code Here: fit data#
for k,v in matrics.items(): print(f"{k}\t{v}")
spark.stop()
###Output
_____no_output_____
###Markdown
Problem 1Apply your skills to classify protein foldType with Decision Tree Classifier Imports
###Code
from mmtfPyspark.ml import SparkMultiClassClassifier, datasetBalancer
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
import mltoolkit
###Output
_____no_output_____
###Markdown
Configure Spark Session
###Code
spark = SparkSession.builder.appName("Problem-1").getOrCreate()
###Output
_____no_output_____
###Markdown
TODO-1: Read in data from parquet file
###Code
parquetFile = './input_features/'
data = # Your Code Here #
###Output
_____no_output_____
###Markdown
TODO-2: Select alpha, beta, alpha+beta foldtypes
###Code
data = # Your Code Here #
print(f"Total number of data: {data.count()}")
###Output
_____no_output_____
###Markdown
TODO-3: Downsample data
###Code
label = 'foldType'
data = # Your Code Here #
print(f"Dataset size (balanced) : {data.count()}")
data.groupby(label).count().show()
###Output
_____no_output_____
###Markdown
TODO-4: Decision Tree Classifier with PySpark
###Code
from pyspark.ml.classification import DecisionTreeClassifier
dtc = # Your Code Here: Make Decision Tree Classifier Class #
mcc = # Your Code Here: Use MulticlassClassifier wrapper on dtc#
matrics = # Your Code Here: fit data#
for k,v in matrics.items(): print(f"{k}\t{v}")
###Output
_____no_output_____
###Markdown
BONUS: Decision Tree Classifier with sklearn
###Code
from sklearn.tree import DecisionTreeClassifier
df = # Your Code Here: convert data to Pandas Dataframe #
dtc = # Your Code Here: Make Decision Tree Classifier Class #
mcc = # Your Code Here: Use MulticlassClassifier wrapper on dtc#
matrics = # Your Code Here: fit data#
for k,v in matrics.items(): print(f"{k}\t{v}")
spark.stop()
###Output
_____no_output_____
###Markdown
Problem 1Apply your skills to classify protein foldType with Decision Tree Classifier Imports
###Code
from mmtfPyspark.ml import SparkMultiClassClassifier, datasetBalancer
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
import mltoolkit
###Output
_____no_output_____
###Markdown
Configure Spark Session
###Code
spark = SparkSession.builder \
.master("local[*]") \
.appName("datasetClassifierProblemset") \
.getOrCreate()
###Output
_____no_output_____
###Markdown
TODO-1: Read in data from parquet file
###Code
parquetFile = './input_features/'
data = spark.read.parquet(parquetFile).cache()
###Output
_____no_output_____
###Markdown
TODO-2: Select alpha, beta, alpha+beta foldtypes
###Code
data = data.where((data.foldType == 'alpha') | (data.foldType == 'beta') | (data.foldType == 'alpha+beta'))
print(f"Total number of data: {data.count()}")
###Output
Total number of data: 14443
###Markdown
TODO-3: Downsample data
###Code
label = 'foldType'
data = datasetBalancer.downsample(data, label, 1)
print(f"Dataset size (balanced) : {data.count()}")
data.groupby(label).count().show()
###Output
Dataset size (balanced) : 3777
+----------+-----+
| foldType|count|
+----------+-----+
|alpha+beta| 1290|
| beta| 1253|
| alpha| 1234|
+----------+-----+
###Markdown
TODO-4: Decision Tree Classifier with PySpark
###Code
from pyspark.ml.classification import DecisionTreeClassifier
dtc = DecisionTreeClassifier()
mcc = SparkMultiClassClassifier(dtc, label)
matrics = mcc.fit(data)
for k,v in matrics.items(): print(f"{k}\t{v}")
###Output
Class Train Test
alpha+beta 913 377
beta 871 382
alpha 874 360
Sample predictions: DecisionTreeClassifier
+----------------+----------+----------+----------+----------+--------------------+------------+-----------------+--------------------+----------+--------------+
|structureChainId| alpha| beta| coil| foldType| features|indexedLabel| rawPrediction| probability|prediction|predictedLabel|
+----------------+----------+----------+----------+----------+--------------------+------------+-----------------+--------------------+----------+--------------+
| 1GVE.B|0.48504984|0.15614618| 0.358804|alpha+beta|[-0.0642171993628...| 0.0|[328.0,85.0,69.0]|[0.68049792531120...| 0.0| alpha+beta|
| 1R4X.A|0.17153284|0.43430656| 0.3941606|alpha+beta|[-0.2385288135872...| 0.0|[328.0,85.0,69.0]|[0.68049792531120...| 0.0| alpha+beta|
| 1T82.B|0.34013605| 0.3605442| 0.2993197|alpha+beta|[-0.1013272784789...| 0.0|[100.0,54.0,22.0]|[0.56818181818181...| 0.0| alpha+beta|
| 2HYY.C|0.44656488|0.17557251| 0.3778626|alpha+beta|[-0.1727037972476...| 0.0| [72.0,22.0,44.0]|[0.52173913043478...| 0.0| alpha+beta|
| 2NQT.B|0.31594202| 0.2173913|0.46666667|alpha+beta|[-0.1495864919792...| 0.0|[328.0,85.0,69.0]|[0.68049792531120...| 0.0| alpha+beta|
+----------------+----------+----------+----------+----------+--------------------+------------+-----------------+--------------------+----------+--------------+
only showing top 5 rows
Total time taken: 46.394468784332275
Method DecisionTreeClassifier
F 0.6615263968417703
Accuracy 0.6577301161751564
Precision 0.6747312683434565
Recall 0.6577301161751564
False Positive Rate 0.1708984297058908
True Positive Rate 0.6577301161751564
Confusion Matrix
['alpha+beta', 'beta', 'alpha']
DenseMatrix([[252., 40., 85.],
[102., 247., 33.],
[102., 21., 237.]])
###Markdown
BONUS: Decision Tree Classifier with sklearn
###Code
from sklearn.tree import DecisionTreeClassifier
df = data.toPandas()
dtc = DecisionTreeClassifier()
mcc = mltoolkit.MultiClassClassifier(dtc, label)
matrics = mcc.fit(df)
for k,v in matrics.items(): print(f"{k}\t{v}")
spark.stop()
###Output
_____no_output_____
###Markdown
Problem 1Apply your skills to classify protein foldType with Decision Tree Classifier Imports
###Code
from mmtfPyspark.ml import SparkMultiClassClassifier, datasetBalancer
from pyspark.sql import SparkSession
from pyspark.sql.functions import *
import mltoolkit
###Output
_____no_output_____
###Markdown
Configure Spark Session
###Code
spark = SparkSession.builder.appName("Problem-1").getOrCreate()
###Output
_____no_output_____
###Markdown
TODO-1: Read in data from parquet file
###Code
parquetFile = './input_features/'
data = # Your Code Here #
###Output
_____no_output_____
###Markdown
TODO-2: Select alpha, beta, alpha+beta foldtypes
###Code
data = # Your Code Here #
print(f"Total number of data: {data.count()}")
###Output
Total number of data: 2390
###Markdown
TODO-3: Downsample data
###Code
label = 'foldType'
data = # Your Code Here #
print(f"Dataset size (balanced) : {data.count()}")
data.groupby(label).count().show()
###Output
Dataset size (balanced) : 1266
+--------+-----+
|foldType|count|
+--------+-----+
| beta| 626|
| alpha| 640|
+--------+-----+
###Markdown
TODO-4: Decision Tree Classifier with PySpark
###Code
from pyspark.ml.classification import DecisionTreeClassifier
dtc = # Your Code Here: Make Decision Tree Classifier Class #
mcc = # Your Code Here: Use MulticlassClassifier wrapper on dtc#
matrics = # Your Code Here: fit data#
for k,v in matrics.items(): print(f"{k}\t{v}")
###Output
Class Train Test
alpha 562 78
beta 561 65
Sample predictions: DecisionTreeClassifier
+----------------+-----------+----------+---------+--------+--------------------+------------+-------------+--------------------+----------+--------------+
|structureChainId| alpha| beta| coil|foldType| features|indexedLabel|rawPrediction| probability|prediction|predictedLabel|
+----------------+-----------+----------+---------+--------+--------------------+------------+-------------+--------------------+----------+--------------+
| 2PW8.I|0.048387095|0.30645162|0.6451613| beta|[0.34626930754166...| 1.0| [13.0,322.0]|[0.03880597014925...| 1.0| beta|
| 4Q1Q.B| 0.0| 0.5| 0.5| beta|[-0.1299340440187...| 1.0| [13.0,322.0]|[0.03880597014925...| 1.0| beta|
| 5F6L.J| 0.0|0.21052632|0.7894737| beta|[0.28061876961818...| 1.0| [23.0,1.0]|[0.95833333333333...| 0.0| alpha|
| 5INB.B| 0.0|0.18181819|0.8181818| beta|[0.20785025656223...| 1.0| [33.0,2.0]|[0.94285714285714...| 0.0| alpha|
| 1OGO.X|0.034965035|0.42482516|0.5402098| beta|[0.00471779905128...| 1.0| [13.0,322.0]|[0.03880597014925...| 1.0| beta|
+----------------+-----------+----------+---------+--------+--------------------+------------+-------------+--------------------+----------+--------------+
only showing top 5 rows
Total time taken: 4.23888897895813
Method DecisionTreeClassifier
AUC 0.8371794871794872
F 0.8390496581875636
Accuracy 0.8391608391608392
Precision 0.8390211449942462
Recall 0.8391608391608392
False Positive Rate 0.16480186480186482
True Positive Rate 0.8391608391608392
Confusion Matrix
['alpha', 'beta']
DenseMatrix([[67., 11.],
[12., 53.]])
###Markdown
BONUS: Decision Tree Classifier with sklearn
###Code
from sklearn.tree import DecisionTreeClassifier
df = # Your Code Here: convert data to Pandas Dataframe #
dtc = # Your Code Here: Make Decision Tree Classifier Class #
mcc = # Your Code Here: Use MulticlassClassifier wrapper on dtc#
matrics = # Your Code Here: fit data#
for k,v in matrics.items(): print(f"{k}\t{v}")
spark.stop()
###Output
_____no_output_____
|
5_Indexing/3_Indexing_ix.ipynb
|
###Markdown
Indexing a using Dataframe.ix[ ] :
###Code
Note: The .ix indexer has been deprecated in recent versions of Pandas.
###Output
_____no_output_____
###Markdown
Selecting a single row using .ix[ ] as .loc[ ]
###Code
# importing pandas package
import pandas as pd
# making data frame from csv file
#url = "C:/Users/deepusuresh/Documents/Data Science/01. Python/3. PANDAS/3. Rows and Columns"
data = pd.read_csv("C:/Users/deepusuresh/Documents/Data Science/01. Python/3. PANDAS/3. Rows and Columns/nba.csv", index_col ="Name")
# retrieving row by ix method
first = data.ix["Avery Bradley"]
first
###Output
C:\Users\deepusuresh\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
This is separate from the ipykernel package so we can avoid doing imports until
###Markdown
Selecting a single row using .ix[ ] as .iloc[ ]
###Code
# retrieving row by ix method
first = data.ix[1]
first
###Output
C:\Users\deepusuresh\Anaconda3\lib\site-packages\ipykernel_launcher.py:3: DeprecationWarning:
.ix is deprecated. Please use
.loc for label based indexing or
.iloc for positional indexing
See the documentation here:
http://pandas.pydata.org/pandas-docs/stable/indexing.html#ix-indexer-is-deprecated
This is separate from the ipykernel package so we can avoid doing imports until
|
clustering/k-means.ipynb
|
###Markdown
K-Means Clustering-----NOTE: Please read this notebook [here](https://nbviewer.jupyter.org/github/FG-33/ml-algorithms/blob/master/clustering/k-means.ipynb?flush_cache=True). It is not rendered properly on Github.This notebook was created using the concepts and ideas conveyed in Stanford's lecture/course *Machine Learning* by Andrew Ng. TheoryK-Means clustering is an algorithm with the goal to cluster/group data into $K$ groups/clusters based on their similarity. insert orher/Nachher VergleichThe similarity will be measured with a fixed heuristic. An example for data points in a two dimensional space could, for example, be the euclidian distance. Therefore the initial situation when using k-means consists of a set of x-dimensional data points:$$ X = \{x^{(1)}, x^{(2)}, x^{(3)}, x^{(4}, .., x^{(m)}\} $$and a heuristic (the *euclidian distance* between a data point $x$ and a cluster center $\mu$, also known as centroid):$$ d = || x - \mu ||^2 $$ In the beginning of the algorithm $K$ cluster centers are initialized. Every data point is then assigned to the centroid it's closest to. This assignement of a data point to a cluster is noted as $c^{(i)}$:$$ c^{(i)} = min_k || x^{(i)} - \mu_k ||_2 $$After assigning the $m$ data points the centroid's shifted based on it's assigned data points. The mean position of the data points is calculated and then assigned as the new position for their centroid. Next, the data points are assigned to their closest centroid again and the centroid's position is adjusted. This is repeated until the distribution does not change anymore. In the end the a cost can be calculated to measure how good the resuling clustering is:$$J(c^{(1)}, .., c^{(m)}, \mu_1, .., \mu_K) = \frac{1}{m} \sum_{i=1}^m || x^{(i)} - \mu_{c^{(i)}} ||_2 $$ In practive multiple configurations with random initialized clusters are processed until convergence to compare the final cost to get a good fit.---- Implementation
###Code
import numpy as np
def euclidean(a, b):
""" Calculates the euclidean distance between two vectors.
:param a: first vector
:param b: second vector
:return distance: euclidean distance between a and b
"""
return np.sqrt(np.power(a - b, 2).sum())
def k_means(x, k, scale=1.5, epsilon=0.005):
""" Clusters the given data using the k-means algorithm.
:param x: n x m matrix containing m n-dimensional vectors representing data points
:param k: number of centroids
:param scale: scale value to increase area where centroids can be generated
(1 = only in the area where data points are)
:param epsilon: break condition
:return c_list: indexes of centroids the corresponding data point is assigned to for each iteration
:return centroids: n x k matrix containing k n-dimensional vectors representing centroid positions
for each iteration
"""
# data properties
n, m = x.shape
max_per_dim = np.amax(x, axis=1, keepdims=True)
min_per_dim = np.amin(x, axis=1, keepdims=True)
diff_max_min = max_per_dim - min_per_dim
center_max_min = min_per_dim + diff_max_min / 2
scale = 2
# return values initialization
c_list = []
centroids_list = []
# randomly initialize centroids
centroids = np.random.rand(2,k) * (diff_max_min * scale) + center_max_min - (diff_max_min * scale / 2)
centroids_list.append(np.copy(centroids))
# initialize centroid assignement list
c = np.zeros((m,), dtype=np.int8)
c_list.append(c)
# clustering
while(True):
# assign data points to centroids
for i in range(m):
# calc distances to all centroids
distances = [euclidean(x[:,i], centroids[:,j]) for j in range(k)]
# assign centroid with smallest distance to this data point
_, idx = min((val, idx) for (idx, val) in enumerate(distances))
c[i] = idx
c_list.append(np.array(c))
# calculate new position of centroids
for centroid in range(k):
mean = np.zeros((n,))
points = 0
# add up assigned points of centroid
for i in range(m):
if c[i] == centroid:
points += 1
mean += x[:,i]
# if centroid has a single assigned data point at least update its positino
if points > 0:
centroids[:, centroid] = mean / points
centroids_list.append(np.copy(centroids))
# if the difference of the centroid position is smaller then break
diff = np.power(centroids_list[-2] - centroids_list[-1],2).sum()
if diff < epsilon:
break
return c_list, centroids_list
def split(c, x):
""" Splits the given data into separate list according the the given assignment.
:param c: the assignment of the data points to centroids
:param x: n x m matrix containing m n-dimensional vectors representing data points
:return split_sets: list of the separated data points
"""
split_sets = []
np_c = np.array(c)
centroids = np.unique(np_c)
for centroid in centroids:
idx = np.array(np_c.transpose() == centroid)
split_sets.append(x.transpose()[idx == True].transpose())
return split_sets
def calc_cost(centroids, x, c):
""" Calculates the cost of assignments for the given data points and centroids.
:param centroids: n x k matrix containing k n-dimensional vectors representing centroid positions
:param x: n x m matrix containing m n-dimensional vectors representing data points
:return cost: the cost of the constellation/assigment
"""
n, m = x.shape
cost = 0
for i in range(m):
cost += euclidean(x[:,i], centroids[:,c[i]])
return cost / m
###Output
_____no_output_____
###Markdown
Data
###Code
# set seed for reproducibility
np.random.seed(100923)
# generate three random sets of points
r = np.random.rand(2, 10) * 60
g = np.random.rand(2, 10) * 40 + np.array([[80],[0]])
b = np.random.rand(2, 10) * 70 + np.array([[95],[55]])
%matplotlib inline
import matplotlib.pyplot as plt
# plot the three sets
plt.figure(1, figsize=(7,7))
plt.plot(r[0,:], r[1,:], 'co')
plt.plot(g[0,:], g[1,:], 'mo')
plt.plot(b[0,:], b[1,:], 'yo')
plt.show()
# combine the sets
x = np.concatenate((np.concatenate((r, g), axis=1), b), axis=1)
###Output
_____no_output_____
###Markdown
Clustering the dataThe optimal number of centroids to run k-means with is difficult to determine. Therefore the choosen approach is to cluster the data multiple times using different amounts of centroids. For each clustering the cost of the resulting assignment is calculated and compared afterwards.
###Code
costs = []
max_clusters = 10 # try with 1-10 centroids
for k in range(1, max_clusters+1):
cost = 0
iterations = 50
for i in range(iterations):
c_list, centroids_list = k_means(x, k)
cost += calc_cost(centroids_list[-1], x, c_list[-1])
print("Current cluster: {}".format(k), end="\r")
costs.append(cost / iterations)
plt.figure(2, figsize=(7,7))
plt.plot(range(1, max_clusters+1), costs, "b-")
plt.show()
###Output
Current cluster: 10
###Markdown
Based on the above plot, the optimal amount of centroids is either $3$ or $4$. It is important to note that the $cost$ will go to $0$ if $k$ goes to $m$. Therefore the amount of centroids is choosen at the part of the graph, starting from the beginning of it, where the slope decreases significantly. This is the case between $3$ and $4$.
###Code
# cluster the given data with 3 centroids
cluster_num = 3
c_list, centroids_list = k_means(x, 3)
import time
from IPython import display
# Plot clustering process
style = ['bo','go', 'ro', 'co', 'mo', 'yo']
plt.figure(1, figsize=(7,7))
for t in range(len(c_list)):
# calc sets to color the data points
sets = split(c_list[t], x)
# time delay for the 'animation'
time.sleep(0.5)
# clear the plot
plt.clf()
plt.axis([-40, 220, -50, 175])
# plot the data
for i, set_ in enumerate(sets):
plt.plot(set_[0,:], set_[1,:], style[i]) # do not use this with more than 6 centroids
plt.plot(centroids_list[t][0,:], centroids_list[t][1,:], 'k*', ms=15)
# handle animation
display.display(plt.gcf())
display.clear_output(wait=True)
###Output
_____no_output_____
###Markdown
TestingThe following code sections can be ignored.
###Code
%%script false
# split data tests
a = np.array([3,1,1,1,1,2,2,2,3,3])
data = np.array([[1,2,3,4,5,6,7,8,9,10],[1,2,3,4,5,6,7,8,9,10]])
np_a = np.array(a)
np_a_unique = np.unique(np_a)
sets = split(a, data)
for set_ in sets:
print(set_)
# cost testing
a = np.array([0, 0])
centroids = np.array([[1],[1]])
b = np.array([[0, 1],[0, 1]])
cost = calc_cost(centroids, b, a)
print(cost)
a = np.array([[1,2,3],[1,2,3]])
print(a.sum())
###Output
_____no_output_____
###Markdown
DAY 13 - Mar 9, 2017
###Code
import pandas as pd
import matplotlib.pyplot as plt
%matplotlib inline
###Output
_____no_output_____
###Markdown
Dataset- https://www.kaggle.com/ludobenistant/hr-analytics
###Code
!head HR_comma_sep.csv
data = pd.read_csv("HR_comma_sep.csv")
print(data.shape)
data.head()
# What is salary
set(data["salary"])
# Select n samples to cluster
X = data.sample(n=200, random_state=1)
X.head()
# Using only certain features
features = X.iloc[:,:-2]
features.head()
###Output
_____no_output_____
###Markdown
TSNE
###Code
from sklearn.manifold import TSNE
tsne = TSNE(n_components=2, random_state=0, perplexity=7).fit_transform(features)
plt.scatter(tsne[:,0], tsne[:,1])
plt.xlabel("tsne_1")
plt.ylabel("tsne_2")
###Output
_____no_output_____
###Markdown
K-means clustering- http://scikit-learn.org/stable/modules/clustering.html
###Code
from sklearn.cluster import KMeans
y_pred = KMeans(n_clusters=3, random_state=1).fit_predict(tsne)
plt.scatter(tsne[:,0], tsne[:,1], c=y_pred)
plt.xlabel("tsne_1")
plt.ylabel("tsne_2")
###Output
_____no_output_____
###Markdown
K-Means--- Table of Contents1. Defined2. Implimentation3. Comparison--- Implimentation
###Code
# IMPORTS
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
class KMeans:
def __init__(self, k_clusters = 2):
self.k_clusters = k_clusters
def _classify(self, centers, x_i):
return np.argmin(np.linalg.norm(centers - np.array(x_i), axis = 1))
def fit(self, X):
self.X = X
self.centers = np.array(random.sample(list(self.X), self.k_clusters))
# Testing variable for if new_cluster = old_cluster
test = True
old_cluster = None
while test:
new_cluster = [self._classify(self.centers, x) for x in X]
if new_cluster == old_cluster: test = False
old_cluster = new_cluster
for i in range(self.k_clusters):
index = np.where(np.array(new_cluster) == i)
self.centers[i] = np.mean(X[index], axis = 0)
return self.centers
def plot(self, y = None, actual_y = None):
X = self.X.T
if y == None:
for i in range(len(X[0])):
plt.scatter(X[:, i][0], X[:, i][1])
else:
for i in range(len(X[0])):
if (actual_y[i] == 1):
plt.scatter(X[:, i][0], X[:, i][1], color="green")
else:
plt.scatter(X[:, i][0], X[:, i][1], color="blue")
plt.scatter(self.centers[0][0], self.centers[0][1], color="red", s=200)
plt.scatter(self.centers[1][0], self.centers[1][1], color="red", s=200)
plt.show()
# FIT
X, Y = make_blobs(n_samples = 200, centers = 2, cluster_std = 3)
model = KMeans()
model.fit(X)
model.plot(actual_y = Y, y = 1)
###Output
_____no_output_____
|
ESR_Test_executed.ipynb
|
###Markdown
**Load main libraries**
###Code
from __future__ import print_function
from ecoop.prov import *
from datetime import datetime as dt
from IPython.core.display import Javascript
import os
import time
from ecoop.ecooputil import shareUtil
from ecoop.cf import cfData, cfPlot
from ecoop.ecooprovdict import ecooProvDict
from ecoop.printer import openDocument, closeDocument, addSection, addSubSection, addFigure
from ecoop.splashtemplate import makeSplash
from ecoop.splashdict import splash
from ecoop.epimagic import *
try :
from os import environ
ecoop_username = environ['ecoop_username']
except :
print("no username provided")
ecoop_username = "anonymous"
# note :
# when no username is provided we can still assign the username to the current unix user with :
# import getpass
# ecoop_username = getpass.getuser()
%matplotlib inline
util = shareUtil()
cfd = cfData()
cfp = cfPlot()
###Output
_____no_output_____
###Markdown
**Initializate the notebook metadata** * It adds the key ```ecoop_prov``` in the main metadata and start a dictionary where to append metadata in json-ld format
###Code
prov = initProv()
prov['http://www.w3.org/ns/prov#startedAtTime'] = provGetTime(dt.utcnow())
###Output
_____no_output_____
###Markdown
Document
###Code
ID = util.get_id('Climate-forcing_pdf')
document = openDocument()
###Output
_____no_output_____
###Markdown
Section 1
###Code
%%writefileref {ID}/climate_forcing.txt {ecoop_username}
Climate patterns over the North Atlantic are important drivers of oceanographic conditions and ecosystem states.
Steadily increasing atmospheric carbon dioxide levels can not only affect climate on global and regional scales
but alter critical aspects of ocean chemistry. Here, we describe the atmospheric forcing mechanisms related
to climate in this region including large-scale atmospheric pressure systems, natural ocean temperature cycles in the North Atlantic,
components of the large-scale circulation of the Atlantic Ocean, and issues related to ocean acidification.
section = addSection(name='Climate Forcing', data=os.path.join(ID,'climate_forcing.txt'))
###Output
_____no_output_____
###Markdown
Sub Section 1
###Code
%%writefileref {ID}/nao.txt {ecoop_username}
Climate and weather over the North Atlantic are strongly influenced by the relative strengths
of two large-scale atmospheric pressure cells -- the Icelandic Low and the Azores High [4].
As the relative strengths of these two pressure systems vary, characteristic patterns of temperature, precipitation, and wind fields are observed.
An index of this dipole pattern has been developed based on the standardized difference in sea level pressure between Lisbon, Portugal and Reykjavík,
Iceland in the winter (December-February; see Glossary for a description of methods used to create standardized indicators).
This North Atlantic Oscillation (NAO) index has been related to key oceanographic and ecological processes in the North Atlantic basin [5].
When the NAO index is high (positive NAO state), the westerly winds shift northward and increase in strength.
Additionally, there is an increase in precipitation over southeastern Canada, the eastern seaboard of the United States,
and northwestern Europe. Water temperatures are cool off Labrador and northern Newfoundland, influencing the formation of Deep Labrador Slope water,
but warm off the United States.
Conversely, when the NAO index is low (negative NAO state), there is a southward shift and decrease in westerly winds, decreased storminess,
and drier conditions over southeastern Canada, the eastern United States, and northwestern Europe.
Water temperatures are warmer off Labrador and Newfoundland, but cooler off the eastern United States.
Since 1972, the NAO has primarily been in a positive state (Figure 1), although notable short-term reversals to a negative state have been observed during this period.
Changes in the NAO have been linked to changes in plankton community composition in the North Atlantic, reflecting changes in both the distribution
and abundance of warm and cold-temperate species.
naodata = cfd.nao_get(save=ID, csvout="nao.csv", prov=True)
# NAO
naodata = cfd.nao_get(save=ID, csvout="nao.csv")
cfp.plot_index(name='NAO_lowess', xticks=10, xticks_fontsize=10,
data=naodata, nb='y', scategory='lowess', frac=1./6, it=6,
output=ID, dateformat=True, figsave="nao.png", prov=True)
time.sleep(1)
nb_name = 'ESR_Test_executed.ipynb'
util.save_notebook(ID, nb_name)
time.sleep(1)
!rm -rf splash_nao.ipynb
nao_datafile = os.path.join(ID,'nao.csv')
naodatalink = util.gistit(filename=nao_datafile, jist='/usr/local/bin/gist', type='text')
nbviewerlink = util.gistit(filename=nb_name, jist='/usr/local/bin/gist', type='notebook')
splash['NAO']['nbviewer'] = nbviewerlink
splash['NAO']['repository'] = 'https://github.com/epifanio/ecoop-1'
splash['NAO']['download'] = 'http://epinux.com/%s' % ID
f = open('splash_nao.ipynb', 'w')
f.write(makeSplash(splash, 'NAO'))
f.close()
naosplashlink = util.gistit(filename='splash_nao.ipynb', jist='/usr/local/bin/gist', type='notebook')
naofig = addFigure(img=os.path.join(ID,'nao.png'), name='North Atlantic Oscillation', metadata=naosplashlink)
naosubsection = addSubSection(name='North Atlantic Oscillation Index', data=os.path.join(ID,'nao.txt'), fig=naofig)
###Output
_____no_output_____
###Markdown
Sub Section 2
###Code
%%writefileref {ID}/amo.txt {ecoop_username}
Multidecadal patterns in sea surface temperature (SST) in the North Atlantic are represented by the Atlantic Multidecadal Oscillation (AMO) index.
The AMO signal is based on spatial patterns in SST variability after removing the effects of anthropogenic forcing on temperature,
revealing natural long term cycles in SST.
The AMO is characterized by warm and cool phases [6] with periods of approximately 20-40 years.
The AMO index is related to air temperatures and rainfall over North America and Europe and is associated
with changes in the frequency of droughts in North America and the frequency of severe hurricane events.
The AMO is thought to be related to the North Atlantic branch of the deep thermohaline circulation
(for more see The Gulf Stream below) which is in turn directly related to dynamics of the Gulf Stream.
The AMO index shows a relatively cool period starting in the early 1960s, extending through the mid 1990s.
Since 1997, the AMO has been in a warm phase (Figure 2).
If past patterns continue to hold, the warm phase will potentially continue for the next several decades.
# AMO
amodata = cfd.amo_get(url='http://www.esrl.noaa.gov/psd/data/correlation/amon.us.long.data', save=ID, csvout="amo.csv")
cfp.plot_index(name='AMO_lowess', xticks=10, xticks_fontsize=10,
data=amodata, nb='y', scategory='lowess', frac=1./6, it=6,
output=ID, dateformat=True, figsave="amo.png", prov=True)
time.sleep(1)
nb_name = 'ESR_Test_executed.ipynb'
util.save_notebook(ID, nb_name)
time.sleep(1)
amo_datafile = os.path.join(ID,'amo.csv')
amodatalink = util.gistit(filename=amo_datafile, jist='/usr/local/bin/gist', type='text')
nbviewerlink2 = util.gistit(filename=nb_name, jist='/usr/local/bin/gist', type='notebook')
splash['AMO']['nbviewer'] = nbviewerlink2
splash['AMO']['repository'] = 'https://github.com/epifanio/ecoop-1'
splash['AMO']['download'] = 'http://epinux.com/%s' % ID
f = open('splash_amo.ipynb', 'w')
f.write(makeSplash(splash, 'AMO'))
f.close()
amosplashlink = util.gistit(filename='splash_amo.ipynb', jist='/usr/local/bin/gist', type='notebook')
amofig = addFigure(img=os.path.join(ID,'amo.png'), name='Atlantic Multidecadal Oscillation', metadata=amosplashlink)
amosubsection = addSubSection(name='Atlantic Multidecadal Oscillation', data=os.path.join(ID,'amo.txt'), fig=amofig)
###Output
_____no_output_____
###Markdown
Write Document
###Code
closedDocument = closeDocument()
texfile=''
texfile += document
texfile += section
texfile += naosubsection
texfile += amosubsection
texfile += closedDocument
#print(texfile)
pdf = os.path.join(ID,'test.tex')
f = open(pdf,'w')
f.write(texfile)
f.close()
!pdflatex -output-directory={ID} {pdf}
###Output
_____no_output_____
###Markdown
PDF available in the ```jupyter dashboard ``` [HOME](../) in the directory with the specified **ID** or via QR code :
###Code
from IPython.core.display import Image
!rm -rf pdf.png
import qrcode
img = qrcode.make("../files/{ID}/test.pdf")
img.save("pdf.png")
Image('pdf.png')
###Output
_____no_output_____
###Markdown
**Note: above is an example - link in qrcode doesn't download the pdf** ** or Upload to SFTP:**```from secret import username, password, hostname, portinputfile = IDoutputfile = '/var/www/shared/%s.zip' % IDutil.uploadfile(username=username, password=password, hostname=hostname, port=port, inputfile=inputfile, outputfile=outputfile, zip=True, link=True, qr=True, apacheroot='/var/www/')``` Add ```date, time, and, User Info``` into the:**```ecoop_prov```** notebooks metadata:
###Code
prov['http://www.w3.org/ns/prov#endedAtTime'] = provGetTime(dt.utcnow().toordinal())
prov["http://www.w3.org/ns/prov#wasAssociatedWith"] = provWasAssociatedWith("massimo patrick")
prov["http://www.w3.org/ns/prov#used"] = provWasAssociatedWith("ecoop_software ipython_software")
prov
###Output
_____no_output_____
###Markdown
**Embed the ```prov``` dictionary in the notebook metadata**
###Code
Javascript("IPython.notebook.metadata['ecoop_prov'] = %s" % prov)
###Output
_____no_output_____
###Markdown
**Save and export the output**
###Code
import time
time.sleep(1)
util.save_notebook(ID, nb_name)
time.sleep(2)
output = ID+'.zip'
util.zipdir(ID, output)
###Output
_____no_output_____
|
notebooks/DiCE_with_advanced_options.ipynb
|
###Markdown
Advanced options to customize Counterfactual ExplanationsHere we discuss a few ways to change DiCE's behavior. * Train a custom ML model * Changing feature weights that decide relative importance of features in perturbation* Trading off between proximity and diversity goals* Selecting the features to change
###Code
# import DiCE
import dice_ml
from dice_ml.utils import helpers # helper functions
# Tensorflow libraries
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
Loading dataset We use "adult" income dataset from UCI Machine Learning Repository (https://archive.ics.uci.edu/ml/datasets/adult). For demonstration purposes, we transform the data as detailed in **dice_ml.utils.helpers** module.
###Code
dataset = helpers.load_adult_income_dataset()
dataset.head()
d = dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')
###Output
_____no_output_____
###Markdown
1. Training a custom ML model Below, we build an Artificial Neural Network based on Keras Tensorflow framework.
###Code
# seeding random numbers for reproducability
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
sess = tf.InteractiveSession()
train, _ = d.split_data(d.normalize_data(d.one_hot_encoded_data))
X_train = train.loc[:, train.columns != 'income']
y_train = train.loc[:, train.columns == 'income']
ann_model = keras.Sequential()
ann_model.add(keras.layers.Dense(20, input_shape=(X_train.shape[1],), kernel_regularizer=keras.regularizers.l1(0.001), activation=tf.nn.relu))
ann_model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
ann_model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.01), metrics=['accuracy'])
ann_model.fit(X_train, y_train, validation_split=0.20, epochs=100, verbose=0, class_weight={0:1,1:2})
# the training will take some time for 100 epochs.
# you can wait or set verbose=1 to see the progress of training.
# provide the trained ML model to DiCE's model object
m = dice_ml.Model(model=ann_model)
###Output
_____no_output_____
###Markdown
Generate diverse counterfactuals
###Code
# initiate DiCE
exp = dice_ml.Dice(d, m)
# query instance in the form of a dictionary; keys: feature name, values: feature value
query_instance = {'age':22,
'workclass':'Private',
'education':'HS-grad',
'marital_status':'Single',
'occupation':'Service',
'race': 'White',
'gender':'Female',
'hours_per_week': 45}
# generate counterfactuals
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite")
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
2. Changing feature weights It may be the case that some features are harder to change than others (e.g., education level is harder to change than working hours per week). DiCE allows input of relative difficulty in changing a feature through specifying *feature weights*. A higher feature weight means that the feature is harder to change than others. For instance, one way is to use the mean absolute deviation from the median as a measure of relative difficulty of changing a continuous feature.Median Absolute Deviation (MAD) of a continuous feature conveys the variability of the feature, and is more robust than standard deviation as is less affected by outliers and non-normality. The inverse of MAD would then imply the ease of varying the feature and is hence used as feature weights in our optimization to reflect the difficulty of changing a continuous feature. By default, DiCE computes this internally and divides the distance between continuous features by the MAD of the feature's values in the training set. Let's see what their values are by computing them below:
###Code
# get MAD
mads = d.get_mads(normalized=True)
# create feature weights
feature_weights = {}
for feature in mads:
feature_weights[feature] = round(1/mads[feature], 2)
print(feature_weights)
###Output
{'age': 7.3, 'hours_per_week': 24.5}
###Markdown
The above feature weights encode that changing *age* is approximately seven times more difficult than changing categorical variables, and changing *hours_per_week* is approximately three times more difficult than changing *age*. Of course, this may sound odd, since a person cannot change their age. In this case, what it's reflecting is that there is a higher diversity in age values than hours-per-week values. Below we show how to over-ride these weights to assign custom user-defined weights. Now, let's try to assign equal weights to all features and see how it affects the counterfactual generation. DiCE allows this through *feature_weights* parameter.
###Code
# assigning equal weights
feature_weights = {'age': 1, 'hours_per_week': 1}
# generate counterfactuals
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
feature_weights=feature_weights)
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
Note that we transform continuous features and one-hot-encode categorical features to fall between 0 and 1 in order to handle relative scale of features. However, this also means that the relative ease of changing continuous features is higher than categorical features when the total number of continuous features are very less compared to the total number of categories of all categorical variables combined. This is reflected in the above table where continuous features (*age* and *hours_per_week*) have been varied to reach their extreme values (*range of age: [17, 90]*; *range of hours_per_week: [1, 99]*) for most of the counterfactuals. This is the reason why the distances are divided by a scaling factor. Deviation from the median provides a robust measure of the variability of a feature’s values, and thus dividing by the MAD allows us to capture the relative prevalence of observing the feature at a particular value (see our [paper](https://arxiv.org/pdf/1905.07697.pdf) for more details). 3. Trading off between proximity and diversity goals We acknowledge that not all counterfactual explanations may be feasible for a user. In general, counterfactuals closer to an individual's profile will be more feasible. Diversity is also important to help an individual choose between multiple possible options. DiCE allows tunable parameters *proximity_weight* (default: 0.5) and *diversity_weight* (default: 1.0) to handle proximity and diversity respectively. Below, we increse the proximity weight and see how the counterfactuals change.
###Code
# change proximity_weight from default value of 0.5 to 1.5
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
proximity_weight=1.5, diversity_weight=1.0)
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
As we see from above table, both continuous and categorical features are more closer to the original query instance and the counterfactuals are also less diverse than before. 4. Selecting the features to vary While counterfactuals provide *actionable* alternative profiles to achieve a different outcome, we note that some of the generated explanations suggest changes in features that cannot be varied easily (such as age), or sensitive attributes like race or gender. Hence, DiCE allows feeding in a list of features that are allowed to vary through a *features_to_vary* parameter.
###Code
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
features_to_vary=['workclass','education','occupation','hours_per_week'])
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
Advanced options to customize Counterfactual ExplanationsHere we discuss a few ways to change DiCE's behavior. * Train a custom ML model * Changing feature weights that decide relative importance of features in perturbation* Trading off between proximity and diversity goals* Selecting the features to change
###Code
# import DiCE
import dice_ml
from dice_ml.utils import helpers # helper functions
# Tensorflow libraries
import tensorflow as tf
from tensorflow import keras
###Output
_____no_output_____
###Markdown
Loading dataset We use "adult" income dataset from UCI Machine Learning Repository (https://archive.ics.uci.edu/ml/datasets/adult). For demonstration purposes, we transform the data as detailed in **dice_ml.utils.helpers** module.
###Code
dataset = helpers.load_adult_income_dataset()
dataset.head()
d = dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')
###Output
_____no_output_____
###Markdown
1. Training a custom ML model Below, we build an Artificial Neural Network based on Keras Tensorflow framework.
###Code
# seeding random numbers for reproducability
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
sess = tf.InteractiveSession()
train, _ = d.split_data(d.normalize_data(d.one_hot_encoded_data))
X_train = train.loc[:, train.columns != 'income']
y_train = train.loc[:, train.columns == 'income']
ann_model = keras.Sequential()
ann_model.add(keras.layers.Dense(20, input_shape=(X_train.shape[1],), kernel_regularizer=keras.regularizers.l1(0.001), activation=tf.nn.relu))
ann_model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
ann_model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.01), metrics=['accuracy'])
ann_model.fit(X_train, y_train, validation_split=0.20, epochs=100, verbose=0, class_weight={0:1,1:2})
# provide the trained ML model to DiCE's model object
m = dice_ml.Model(model=ann_model)
###Output
_____no_output_____
###Markdown
Generate diverse counterfactuals
###Code
# initiate DiCE
exp = dice_ml.Dice(d, m)
# query instance in the form of a dictionary; keys: feature name, values: feature value
query_instance = {'age':22,
'workclass':'Private',
'education':'HS-grad',
'marital_status':'Single',
'occupation':'Service',
'race': 'White',
'gender':'Female',
'hours_per_week': 45}
# generate counterfactuals
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite")
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
2. Changing feature weights It may be the case that some features are harder to change than others (e.g., education level is harder to change than working hours per week). DiCE allows input of relative difficulty in changing a feature through specifying *feature weights*. A higher feature weight means that the feature is harder to change than others. For instance, one way is to use the mean absolute deviation from the median as a measure of relative difficulty of changing a continuous feature.Median Absolute Deviation (MAD) of a continuous feature conveys the variability of the feature, and is more robust than standard deviation as is less affected by outliers and non-normality. The inverse of MAD would then imply the ease of varying the feature and is hence used as feature weights in our optimization to reflect the difficulty of changing a continuous feature. By default, DiCE computes this internally and divides the distance between continuous features by the MAD of the feature's values in the training set. Let's see what their values are by computing them below:
###Code
# get MAD
mads = d.get_mads(normalized=True)
# create feature weights
feature_weights = {}
for feature in mads:
feature_weights[feature] = round(1/mads[feature], 2)
print(feature_weights)
###Output
{'age': 7.3, 'hours_per_week': 24.5}
###Markdown
The above feature weights imply that changing *age* is approximately seven times more difficult than changing categorical variables, and changing *hours_per_week* is approximately three times more difficult than chaning *age*. Now, let's try to assign equal weights to all features and see how it affects the counterfactual generation. DiCE allows this through *feature_weights* parameter.
###Code
# assigning equal weights
feature_weights = {'age': 1, 'hours_per_week': 1}
# generate counterfactuals
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
feature_weights=feature_weights)
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
We transform continuous features and one-hot-encode categorical features to fall between 0 and 1 in order to handle relative scale of features. However, this also means that the relative ease of changing continuous features is higher than categorical features, when the total number of continuous features are very less compared to the total number of categories of all categorical variables combined. This is reflected in the above table where continuous features (*age* and *hours_per_week*) have been varied to reach their extreme values (*range of age: [17, 90]*; *range of hours_per_week: [1, 99]*) for most of the counterfactuals. This is the reason why the distances are divided by a scaling factor. Deviation from the median provides a robust measure of the variability of a feature’s values, and thus dividing by the MAD allows us to capture the relative prevalence of observing the feature at a particular value (see our [paper](https://arxiv.org/pdf/1905.07697.pdf) for more details). 3. Trading off between proximity and diversity goals We acknowledge that not all counterfactual explanations may be feasible for a user. In general, counterfactuals closer to an individual's profile will be more feasible. Diversity is also important to help an individual choose between multiple possible options. DiCE allows tunable parameters *proximity_weight* (default: 0.5) and *diversity_weight* (default: 1.0) to handle proximity and diversity respectively. Below, we increse the proximity weight and see how the counterfactuals change.
###Code
# change proximity_weight from default value of 0.5 to 1.5
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
proximity_weight=1.5, diversity_weight=1.0)
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
As we see from above table, both continuous and categorical features are more closer to the original query instance and the counterfactuals are also less diverse than before. 4. Selecting the features to vary While counterfactuals provide *actionable* alternative profiles to achieve a different outcome, we note that some of the generated explanations suggest changes in sensitive attributes like race or gender. Hence, DiCE allows feeding in a list of features that are allowed to vary through a *features_to_vary* parameter.
###Code
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
features_to_vary=['age','workclass','education','occupation','hours_per_week'])
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
Advanced options to customize Counterfactual ExplanationsHere we discuss a few ways to change DiCE's behavior. * Train a custom ML model * Changing feature weights that decide relative importance of features in perturbation* Trading off between proximity and diversity goals* Selecting the features to change
###Code
# import DiCE
import dice_ml
from dice_ml.utils import helpers # helper functions
# Tensorflow libraries
import tensorflow as tf
from tensorflow import keras
# supress deprecation warnings from TF
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
###Output
_____no_output_____
###Markdown
Loading dataset We use "adult" income dataset from UCI Machine Learning Repository (https://archive.ics.uci.edu/ml/datasets/adult). For demonstration purposes, we transform the data as detailed in **dice_ml.utils.helpers** module.
###Code
dataset = helpers.load_adult_income_dataset()
dataset.head()
d = dice_ml.Data(dataframe=dataset, continuous_features=['age', 'hours_per_week'], outcome_name='income')
###Output
_____no_output_____
###Markdown
1. Training a custom ML model Below, we build an Artificial Neural Network based on Keras Tensorflow framework.
###Code
# seeding random numbers for reproducability
from numpy.random import seed
seed(1)
from tensorflow import set_random_seed
set_random_seed(2)
sess = tf.InteractiveSession()
train, _ = d.split_data(d.normalize_data(d.one_hot_encoded_data))
X_train = train.loc[:, train.columns != 'income']
y_train = train.loc[:, train.columns == 'income']
ann_model = keras.Sequential()
ann_model.add(keras.layers.Dense(20, input_shape=(X_train.shape[1],), kernel_regularizer=keras.regularizers.l1(0.001), activation=tf.nn.relu))
ann_model.add(keras.layers.Dense(1, activation=tf.nn.sigmoid))
ann_model.compile(loss='binary_crossentropy', optimizer=tf.keras.optimizers.Adam(0.01), metrics=['accuracy'])
ann_model.fit(X_train, y_train, validation_split=0.20, epochs=100, verbose=0, class_weight={0:1,1:2})
# the training will take some time for 100 epochs.
# you can wait or set verbose=1 to see the progress of training.
# provide the trained ML model to DiCE's model object
m = dice_ml.Model(model=ann_model)
###Output
_____no_output_____
###Markdown
Generate diverse counterfactuals
###Code
# initiate DiCE
exp = dice_ml.Dice(d, m)
# query instance in the form of a dictionary; keys: feature name, values: feature value
query_instance = {'age':22,
'workclass':'Private',
'education':'HS-grad',
'marital_status':'Single',
'occupation':'Service',
'race': 'White',
'gender':'Female',
'hours_per_week': 45}
# generate counterfactuals
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite")
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
2. Changing feature weights It may be the case that some features are harder to change than others (e.g., education level is harder to change than working hours per week). DiCE allows input of relative difficulty in changing a feature through specifying *feature weights*. A higher feature weight means that the feature is harder to change than others. For instance, one way is to use the mean absolute deviation from the median as a measure of relative difficulty of changing a continuous feature.Median Absolute Deviation (MAD) of a continuous feature conveys the variability of the feature, and is more robust than standard deviation as is less affected by outliers and non-normality. The inverse of MAD would then imply the ease of varying the feature and is hence used as feature weights in our optimization to reflect the difficulty of changing a continuous feature. By default, DiCE computes this internally and divides the distance between continuous features by the MAD of the feature's values in the training set. Let's see what their values are by computing them below:
###Code
# get MAD
mads = d.get_mads(normalized=True)
# create feature weights
feature_weights = {}
for feature in mads:
feature_weights[feature] = round(1/mads[feature], 2)
print(feature_weights)
###Output
{'age': 7.3, 'hours_per_week': 24.5}
###Markdown
The above feature weights encode that changing *age* is approximately seven times more difficult than changing categorical variables, and changing *hours_per_week* is approximately three times more difficult than changing *age*. Of course, this may sound odd, since a person cannot change their age. In this case, what it's reflecting is that there is a higher diversity in age values than hours-per-week values. Below we show how to over-ride these weights to assign custom user-defined weights. Now, let's try to assign unit weights to the continuous features and see how it affects the counterfactual generation. DiCE allows this through *feature_weights* parameter.
###Code
# assigning equal weights
feature_weights = {'age': 1, 'hours_per_week': 1}
# generate counterfactuals
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
feature_weights=feature_weights)
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
Note that we transform continuous features and one-hot-encode categorical features to fall between 0 and 1 in order to handle relative scale of features. However, this also means that the relative ease of changing continuous features is higher than categorical features when the total number of continuous features are very less compared to the total number of categories of all categorical variables combined. This is reflected in the above table where continuous features (*age* and *hours_per_week*) have been varied to reach their extreme values (*range of age: [17, 90]*; *range of hours_per_week: [1, 99]*) for most of the counterfactuals. This is the reason why the distances are divided by a scaling factor. Deviation from the median provides a robust measure of the variability of a feature’s values, and thus dividing by the MAD allows us to capture the relative prevalence of observing the feature at a particular value (see our [paper](https://arxiv.org/pdf/1905.07697.pdf) for more details). 3. Trading off between proximity and diversity goals We acknowledge that not all counterfactual explanations may be feasible for a user. In general, counterfactuals closer to an individual's profile will be more feasible. Diversity is also important to help an individual choose between multiple possible options. DiCE allows tunable parameters *proximity_weight* (default: 0.5) and *diversity_weight* (default: 1.0) to handle proximity and diversity respectively. Below, we increse the proximity weight and see how the counterfactuals change.
###Code
# change proximity_weight from default value of 0.5 to 1.5
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
proximity_weight=1.5, diversity_weight=1.0)
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
###Markdown
As we see from above table, both continuous and categorical features are more closer to the original query instance and the counterfactuals are also less diverse than before. 4. Selecting the features to vary While counterfactuals provide *actionable* alternative profiles to achieve a different outcome, we note that some of the generated explanations suggest changes in features that cannot be varied easily (such as age), or sensitive attributes like race or gender. Hence, DiCE allows feeding in a list of features that are allowed to vary through a *features_to_vary* parameter.
###Code
dice_exp = exp.generate_counterfactuals(query_instance, total_CFs=4, desired_class="opposite",
features_to_vary=['workclass','education','occupation','hours_per_week'])
# visualize the resutls
dice_exp.visualize_as_dataframe()
###Output
Query instance (original outcome : 0)
|
docs/source/examples/heat.ipynb
|
###Markdown
HeatIn this example the laser-excitation of a sample `Structure` is shown.It includes the actual absorption of the laser light as well as the transient temperature profile calculation. SetupDo all necessary imports and settings.
###Code
import udkm1Dsim as ud
u = ud.u # import the pint unit registry from udkm1Dsim
import scipy.constants as constants
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
u.setup_matplotlib() # use matplotlib with pint units
###Output
_____no_output_____
###Markdown
StructureRefer to the [structure-example](structure.ipynb) for more details.
###Code
O = ud.Atom('O')
Ti = ud.Atom('Ti')
Sr = ud.Atom('Sr')
Ru = ud.Atom('Ru')
Pb = ud.Atom('Pb')
Zr = ud.Atom('Zr')
# c-axis lattice constants of the two layers
c_STO_sub = 3.905*u.angstrom
c_SRO = 3.94897*u.angstrom
# sound velocities [nm/ps] of the two layers
sv_SRO = 6.312*u.nm/u.ps
sv_STO = 7.800*u.nm/u.ps
# SRO layer
prop_SRO = {}
prop_SRO['a_axis'] = c_STO_sub # aAxis
prop_SRO['b_axis'] = c_STO_sub # bAxis
prop_SRO['deb_Wal_Fac'] = 0 # Debye-Waller factor
prop_SRO['sound_vel'] = sv_SRO # sound velocity
prop_SRO['opt_ref_index'] = 2.44+4.32j
prop_SRO['therm_cond'] = 5.72*u.W/(u.m *u.K) # heat conductivity
prop_SRO['lin_therm_exp'] = 1.03e-5 # linear thermal expansion
prop_SRO['heat_capacity'] = 'lambda T: 455.2 + 0.112*T - 2.1935e6/T**2' # heat capacity [J/kg K]
SRO = ud.UnitCell('SRO', 'Strontium Ruthenate', c_SRO, **prop_SRO)
SRO.add_atom(O, 0)
SRO.add_atom(Sr, 0)
SRO.add_atom(O, 0.5)
SRO.add_atom(O, 0.5)
SRO.add_atom(Ru, 0.5)
# STO substrate
prop_STO_sub = {}
prop_STO_sub['a_axis'] = c_STO_sub # aAxis
prop_STO_sub['b_axis'] = c_STO_sub # bAxis
prop_STO_sub['deb_Wal_Fac'] = 0 # Debye-Waller factor
prop_STO_sub['sound_vel'] = sv_STO # sound velocity
prop_STO_sub['opt_ref_index'] = 2.1+0j
prop_STO_sub['therm_cond'] = 12*u.W/(u.m *u.K) # heat conductivity
prop_STO_sub['lin_therm_exp'] = 1e-5 # linear thermal expansion
prop_STO_sub['heat_capacity'] = 'lambda T: 733.73 + 0.0248*T - 6.531e6/T**2' # heat capacity [J/kg K]
STO_sub = ud.UnitCell('STOsub', 'Strontium Titanate Substrate', c_STO_sub, **prop_STO_sub)
STO_sub.add_atom(O, 0)
STO_sub.add_atom(Sr, 0)
STO_sub.add_atom(O, 0.5)
STO_sub.add_atom(O, 0.5)
STO_sub.add_atom(Ti, 0.5)
S = ud.Structure('Single Layer')
S.add_sub_structure(SRO, 100) # add 100 layers of SRO to sample
S.add_sub_structure(STO_sub, 200) # add 200 layers of STO substrate
###Output
_____no_output_____
###Markdown
Initialize HeatThe `Heat` class requires a `Structure` object and a boolean `force_recalc` in order overwrite previous simulation results.These results are saved in the `cache_dir` when `save_data` is enabled.Printing simulation messages can be en-/disabled using `disp_messages` and progress bars can using the boolean switch `progress_bar`.
###Code
h = ud.Heat(S, True)
h.save_data = False
h.disp_messages = True
print(h)
###Output
Heat simulation properties:
This is the current structure for the simulations:
Structure properties:
Name : Single Layer
Thickness : 117.59 nanometer
Roughness : 0.00 nanometer
----
100 times Strontium Ruthenate: 39.49 nanometer
200 times Strontium Titanate Substrate: 78.10 nanometer
----
no substrate
Display properties:
================================ =======================================================
parameter value
================================ =======================================================
force recalc True
cache directory ./
display messages True
save data False
progress bar True
excitation fluence [] mJ/cm²
excitation delay [0.0] ps
excitation pulse length [0.0] ps
excitation wavelength 799.9999999999999 nm
excitation theta 90.0 deg
excitation multilayer absorption True
heat diffusion False
interpolate at interfaces 11
backend scipy
distances no distance mesh is set for heat diffusion calculations
top boundary type isolator
bottom boundary type isolator
================================ =======================================================
###Markdown
Simple ExcitationIn order to calculate the temperature of the sample after quasi-instantaneous (delta) photoexcitation the `excitation` must be set with the following parameters:* `fluence`* `delay_pump`* `pulse_width`* `multilayer_absorption`* `wavelength`* `theta`The angle of incidence `theta` does change the footprint of the excitation on the sample for any type excitation.The `wavelength` and `theta` angle of the excitation are also relevant if `multilayer_absorption = True`.Otherwise the _Lambert_Beer_-law is used and its absorption profile is independent of `wavelength` and `theta`. __Note:__ the `fluence`, `delay_pump`, and `pulse_width` must be given as `array` or `list`.The simulation requires also a `delay` array as temporal grid as well as an initial temperature `init_temp`.The later can be either a scalar which is then the constant temperature of the whole sample structure, or the initial temperature can be an array of temperatures for each single layer in the structure.
###Code
h.excitation = {'fluence': [5]*u.mJ/u.cm**2,
'delay_pump': [0]*u.ps,
'pulse_width': [0]*u.ps,
'multilayer_absorption': True,
'wavelength': 800*u.nm,
'theta': 45*u.deg}
# when calculating the laser absorption profile using Lamber-Beer-law
# the opt_pen_depth must be set manually or calculated from the refractive index
SRO.set_opt_pen_depth_from_ref_index(800*u.nm)
STO_sub.set_opt_pen_depth_from_ref_index(800*u.nm)
# temporal and spatial grid
delays = np.r_[-10:200:0.1]*u.ps
_, _, distances = S.get_distances_of_layers()
###Output
c:\users\schick\general\python\wpy64-3770\python-3.7.7.amd64\lib\site-packages\pint\quantity.py:1138: RuntimeWarning: divide by zero encountered in double_scalars
magnitude = magnitude_op(self._magnitude, other_magnitude)
###Markdown
Laser Absorption ProfileHere the difference in the spatial laser absorption profile is shown between the multilayer absorption algorithm and the Lambert-Beer law. Note that Lambert-Beer does not include reflection of the incident light from the surface of the sample structure:
###Code
plt.figure()
dAdz, _, _, _ = h.get_multilayers_absorption_profile()
plt.plot(distances.to('nm'), dAdz, label='multilayer')
dAdz = h.get_Lambert_Beer_absorption_profile()
plt.plot(distances.to('nm'), dAdz, label='Lamber-Beer')
plt.legend()
plt.xlabel('Distance [nm]')
plt.ylabel('Differnetial Absorption')
plt.title('Laser Absorption Profile')
plt.show()
###Output
Absorption profile is calculated by multilayer formalism.
Total reflectivity of 56.1 % and transmission of 5.7 %.
Absorption profile is calculated by Lambert-Beer's law.
###Markdown
Temperature Map
###Code
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :])
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.title('Temperature Profile')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map)
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Heat DiffusionIn order to enable heat diffusion the boolean switch `heat_diffusion` must be `True`.
###Code
# enable heat diffusion
h.heat_diffusion = True
# set the boundary conditions
h.boundary_conditions = {'top_type': 'isolator', 'bottom_type': 'isolator'}
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :], label=np.round(delays[101]))
plt.plot(distances.to('nm').magnitude, temp_map[501, :], label=np.round(delays[501]))
plt.plot(distances.to('nm').magnitude, temp_map[-1, :], label=np.round(delays[-1]))
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.legend()
plt.title('Temperature Profile with Heat Diffusion')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map)
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map with Heat Diffusion')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Heat Diffusion ParametersFor heat diffusion simulations various parameters for the underlying pdepe solver can be altered.By default, the `backend` is set to `scipy` but can be switched to `matlab`.Currently, the is no obvious reason to choose _MATLAB_ above _SciPy_.Depending on the `backend` either the `ode_options` or `ode_options_matlab` can be configured and are directly handed to the actual solver.Please refer to the documentation of the actual backend and solver and the __API documentation__ for more details.The speed but also the result of the heat diffusion simulation strongly depends on the spatial grid handed to the solver.By default, one spatial grid point is used for every `Layer` (`AmorphousLayer` or `UnitCell`) in the `Structure`.The resulting `temp_map` will also always be interpolated in this spatial grid which is equivalent to the distance vector returned by `S.get_distances_of_layers()`.As the solver for the heat diffusion usually suffers from large gradients, e.g. of thermal properties or initial temperatures, additional spatial grid points are added by default only for internal calculations.The number of additional points (should be an odd number, default is 11) is set by:
###Code
h.intp_at_interface = 11
###Output
_____no_output_____
###Markdown
The internally used spatial grid can be returned by:
###Code
dist_interp, original_indicies = S.interp_distance_at_interfaces(h.intp_at_interface)
###Output
_____no_output_____
###Markdown
The internal spatial grid can also be given by hand, e.g. to realize logarithmic steps for rather large `Structure`:
###Code
h.distances = np.linspace(0, distances.magnitude[-1], 100)*u.m
###Output
_____no_output_____
###Markdown
As already shown above, the heat diffusion simulation supports also an top and bottom boundary condition. The can have the types:* `isolator`* `temperature`* `flux`For the later types also a value must be provides:
###Code
h.boundary_conditions = {'top_type': 'temperature', 'top_value': 500*u.K,
'bottom_type': 'flux', 'bottom_value': 5e11*u.W/u.m**2}
print(h)
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :], label=np.round(delays[101]))
plt.plot(distances.to('nm').magnitude, temp_map[501, :], label=np.round(delays[501]))
plt.plot(distances.to('nm').magnitude, temp_map[-1, :], label=np.round(delays[-1]))
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.legend()
plt.title('Temperature Profile with Heat Diffusion and BC')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map)
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map with Heat Diffusion and BC')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Multipulse ExcitationAs already stated above, also multiple pulses of variable fluence, pulse width and, delay are possible.The heat diffusion simulation automatically splits the calculation in parts with and without excitation and adjusts the initial temporal step width according to the pulse width.Hence the solver does not miss any excitation pulses when adjusting its temporal step size.The temporal laser pulse profile is always assumed to be Gaussian and the pulse width must be given as FWHM:
###Code
h.excitation = {'fluence': [5, 5, 5]*u.mJ/u.cm**2,
'delay_pump': [0, 10, 20]*u.ps,
'pulse_width': [0.1, 0.1, 0.1]*u.ps,
'multilayer_absorption': True,
'wavelength': 800*u.nm,
'theta': 45*u.deg}
h.boundary_conditions = {'top_type': 'isolator', 'bottom_type': 'isolator'}
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :], label=np.round(delays[101]))
plt.plot(distances.to('nm').magnitude, temp_map[201, :], label=np.round(delays[201]))
plt.plot(distances.to('nm').magnitude, temp_map[301, :], label=np.round(delays[301]))
plt.plot(distances.to('nm').magnitude, temp_map[-1, :], label=np.round(delays[-1]))
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.legend()
plt.title('Temperature Profile Multiplulse')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map)
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map Multiplulse')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
$N$-Temperature ModelThe heat diffusion is also capable of simulating an _N_-temperature model which is often applied to empirically simulate the energy flow between _electrons_, _phonons_, and _spins_.In order to run the _NTM_ all thermo-elastic properties must be given as a list of _N_ elements.In addition the `sub_system_coupling` must be provided in order to allow for energy-flow between the sub-systems.The actual external laser-excitation is always set to happen within the __first__ sub-system, which is usually the _electron_-system.In case the thermo-elastic parameters are provided as _lambda_-functions, the `sub_system_coupling` requires the temperature `T` to be a vector of all sub-system-temperatures which can be accessed in the _lambda_-function via the bracket[ ]-notation. The `heat_capacity` and `lin_therm_exp` instead require the temperature `T` to be a scalar of only the current sub-system-temperature. For the `therm_cond` both options are available.
###Code
# update the relevant thermo-elastic properties of the layers in the sample structure
SRO.therm_cond = [0,
5.72*u.W/(u.m *u.K)]
SRO.lin_therm_exp = [1.03e-5,
1.03e-5]
SRO.heat_capacity = ['lambda T: 0.112*T',
'lambda T: 455.2 - 2.1935e6/T**2']
SRO.sub_system_coupling = ['lambda T: 5e17*(T[1]-T[0])',
'lambda T: 5e17*(T[0]-T[1])']
STO_sub.therm_cond = [0,
12*u.W/(u.m *u.K)]
STO_sub.lin_therm_exp = [1e-5,
1e-5]
STO_sub.heat_capacity = ['lambda T: 0.0248*T',
'lambda T: 733.73 - 6.531e6/T**2']
STO_sub.sub_system_coupling = ['lambda T: 5e17*(T[1]-T[0])',
'lambda T: 5e17*(T[0]-T[1])']
###Output
_____no_output_____
###Markdown
As no new `Structure` is build, the `num_sub_systems` must be updated by hand.Otherwise this happens automatically.
###Code
S.num_sub_systems = 2
###Output
_____no_output_____
###Markdown
Set the excitation conditions:
###Code
h.excitation = {'fluence': [5]*u.mJ/u.cm**2,
'delay_pump': [0]*u.ps,
'pulse_width': [0.25]*u.ps,
'multilayer_absorption': True,
'wavelength': 800*u.nm,
'theta': 45*u.deg}
h.boundary_conditions = {'top_type': 'isolator', 'bottom_type': 'isolator'}
delays = np.r_[-5:15:0.01]*u.ps
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map[:, :, 0])
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map Electrons')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map[:, :, 1])
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map Phonons')
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(delays.to('ps'), np.mean(temp_map[:, S.get_all_positions_per_unique_layer()['SRO'], 0], 1), label='SRO electrons')
plt.plot(delays.to('ps'), np.mean(temp_map[:, S.get_all_positions_per_unique_layer()['SRO'], 1], 1), label='SRO phonons')
plt.ylabel('Temperature [K]')
plt.xlabel('Delay [ps]')
plt.legend()
plt.title('Temperature Electrons vs. Phonons')
plt.show()
###Output
_____no_output_____
###Markdown
HeatIn this example the laser-excitation of a sample `Structure` is shown.It includes the actual absorption of the laser light as well as the transient temperature profile calculation. SetupDo all necessary imports and settings.
###Code
import udkm1Dsim as ud
u = ud.u # import the pint unit registry from udkm1Dsim
import scipy.constants as constants
import numpy as np
import matplotlib.pyplot as plt
%matplotlib inline
u.setup_matplotlib() # use matplotlib with pint units
###Output
_____no_output_____
###Markdown
StructureRefer to the [structure-example](structure.ipynb) for more details.
###Code
O = ud.Atom('O')
Ti = ud.Atom('Ti')
Sr = ud.Atom('Sr')
Ru = ud.Atom('Ru')
Pb = ud.Atom('Pb')
Zr = ud.Atom('Zr')
# c-axis lattice constants of the two layers
c_STO_sub = 3.905*u.angstrom
c_SRO = 3.94897*u.angstrom
# sound velocities [nm/ps] of the two layers
sv_SRO = 6.312*u.nm/u.ps
sv_STO = 7.800*u.nm/u.ps
# SRO layer
prop_SRO = {}
prop_SRO['a_axis'] = c_STO_sub # aAxis
prop_SRO['b_axis'] = c_STO_sub # bAxis
prop_SRO['deb_Wal_Fac'] = 0 # Debye-Waller factor
prop_SRO['sound_vel'] = sv_SRO # sound velocity
prop_SRO['opt_ref_index'] = 2.44+4.32j
prop_SRO['therm_cond'] = 5.72*u.W/(u.m*u.K) # heat conductivity
prop_SRO['lin_therm_exp'] = 1.03e-5 # linear thermal expansion
prop_SRO['heat_capacity'] = '455.2 + 0.112*T - 2.1935e6/T**2' # heat capacity [J/kg K]
SRO = ud.UnitCell('SRO', 'Strontium Ruthenate', c_SRO, **prop_SRO)
SRO.add_atom(O, 0)
SRO.add_atom(Sr, 0)
SRO.add_atom(O, 0.5)
SRO.add_atom(O, 0.5)
SRO.add_atom(Ru, 0.5)
# STO substrate
prop_STO_sub = {}
prop_STO_sub['a_axis'] = c_STO_sub # aAxis
prop_STO_sub['b_axis'] = c_STO_sub # bAxis
prop_STO_sub['deb_Wal_Fac'] = 0 # Debye-Waller factor
prop_STO_sub['sound_vel'] = sv_STO # sound velocity
prop_STO_sub['opt_ref_index'] = 2.1+0j
prop_STO_sub['therm_cond'] = 12*u.W/(u.m*u.K) # heat conductivity
prop_STO_sub['lin_therm_exp'] = 1e-5 # linear thermal expansion
prop_STO_sub['heat_capacity'] = '733.73 + 0.0248*T - 6.531e6/T**2' # heat capacity [J/kg K]
STO_sub = ud.UnitCell('STOsub', 'Strontium Titanate Substrate', c_STO_sub, **prop_STO_sub)
STO_sub.add_atom(O, 0)
STO_sub.add_atom(Sr, 0)
STO_sub.add_atom(O, 0.5)
STO_sub.add_atom(O, 0.5)
STO_sub.add_atom(Ti, 0.5)
S = ud.Structure('Single Layer')
S.add_sub_structure(SRO, 100) # add 100 layers of SRO to sample
S.add_sub_structure(STO_sub, 200) # add 200 layers of STO substrate
###Output
_____no_output_____
###Markdown
Initialize HeatThe `Heat` class requires a `Structure` object and a boolean `force_recalc` in order overwrite previous simulation results.These results are saved in the `cache_dir` when `save_data` is enabled.Printing simulation messages can be en-/disabled using `disp_messages` and progress bars can using the boolean switch `progress_bar`.
###Code
h = ud.Heat(S, True)
h.save_data = False
h.disp_messages = True
print(h)
###Output
Heat simulation properties:
This is the current structure for the simulations:
Structure properties:
Name : Single Layer
Thickness : 117.59 nanometer
Roughness : 0.00 nanometer
----
100 times Strontium Ruthenate: 39.49 nanometer
200 times Strontium Titanate Substrate: 78.10 nanometer
----
no substrate
Display properties:
================================ =======================================================
parameter value
================================ =======================================================
force recalc True
cache directory ./
display messages True
save data False
progress bar True
excitation fluence [] mJ/cm²
excitation delay [0.0] ps
excitation pulse length [0.0] ps
excitation wavelength 799.9999999999999 nm
excitation theta 90.0 deg
excitation multilayer absorption True
heat diffusion False
interpolate at interfaces 11
backend scipy
distances no distance mesh is set for heat diffusion calculations
top boundary type isolator
bottom boundary type isolator
================================ =======================================================
###Markdown
Simple ExcitationIn order to calculate the temperature of the sample after quasi-instantaneous (delta) photoexcitation the `excitation` must be set with the following parameters:* `fluence`* `delay_pump`* `pulse_width`* `multilayer_absorption`* `wavelength`* `theta`The angle of incidence `theta` does change the footprint of the excitation on the sample for any type excitation.The `wavelength` and `theta` angle of the excitation are also relevant if `multilayer_absorption = True`.Otherwise the _Lambert_Beer_-law is used and its absorption profile is independent of `wavelength` and `theta`. __Note:__ the `fluence`, `delay_pump`, and `pulse_width` must be given as `array` or `list`.The simulation requires also a `delay` array as temporal grid as well as an initial temperature `init_temp`.The later can be either a scalar which is then the constant temperature of the whole sample structure, or the initial temperature can be an array of temperatures for each single layer in the structure.
###Code
h.excitation = {'fluence': [5]*u.mJ/u.cm**2,
'delay_pump': [0]*u.ps,
'pulse_width': [0]*u.ps,
'multilayer_absorption': True,
'wavelength': 800*u.nm,
'theta': 45*u.deg}
# when calculating the laser absorption profile using Lamber-Beer-law
# the opt_pen_depth must be set manually or calculated from the refractive index
SRO.set_opt_pen_depth_from_ref_index(800*u.nm)
STO_sub.set_opt_pen_depth_from_ref_index(800*u.nm)
# temporal and spatial grid
delays = np.r_[-10:200:0.1]*u.ps
_, _, distances = S.get_distances_of_layers()
###Output
_____no_output_____
###Markdown
Laser Absorption ProfileHere the difference in the spatial laser absorption profile is shown between the multilayer absorption algorithm and the Lambert-Beer law. Note that Lambert-Beer does not include reflection of the incident light from the surface of the sample structure:
###Code
plt.figure()
dAdz, _, _, _ = h.get_multilayers_absorption_profile()
plt.plot(distances.to('nm'), dAdz, label='multilayer')
dAdz = h.get_Lambert_Beer_absorption_profile()
plt.plot(distances.to('nm'), dAdz, label='Lamber-Beer')
plt.legend()
plt.xlabel('Distance [nm]')
plt.ylabel('Differnetial Absorption')
plt.title('Laser Absorption Profile')
plt.show()
###Output
Absorption profile is calculated by multilayer formalism.
Total reflectivity of 56.1 % and transmission of 5.7 %.
Absorption profile is calculated by Lambert-Beer's law.
###Markdown
Temperature Map
###Code
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :])
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.title('Temperature Profile')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map, shading='auto')
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Heat DiffusionIn order to enable heat diffusion the boolean switch `heat_diffusion` must be `True`.
###Code
# enable heat diffusion
h.heat_diffusion = True
# set the boundary conditions
h.boundary_conditions = {'top_type': 'isolator', 'bottom_type': 'isolator'}
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :], label=np.round(delays[101]))
plt.plot(distances.to('nm').magnitude, temp_map[501, :], label=np.round(delays[501]))
plt.plot(distances.to('nm').magnitude, temp_map[-1, :], label=np.round(delays[-1]))
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.legend()
plt.title('Temperature Profile with Heat Diffusion')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map, shading='auto')
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map with Heat Diffusion')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Heat Diffusion ParametersFor heat diffusion simulations various parameters for the underlying pdepe solver can be altered.By default, the `backend` is set to `scipy` but can be switched to `matlab`.Currently, the is no obvious reason to choose _MATLAB_ above _SciPy_.Depending on the `backend` either the `ode_options` or `ode_options_matlab` can be configured and are directly handed to the actual solver.Please refer to the documentation of the actual backend and solver and the __API documentation__ for more details.The speed but also the result of the heat diffusion simulation strongly depends on the spatial grid handed to the solver.By default, one spatial grid point is used for every `Layer` (`AmorphousLayer` or `UnitCell`) in the `Structure`.The resulting `temp_map` will also always be interpolated in this spatial grid which is equivalent to the distance vector returned by `S.get_distances_of_layers()`.As the solver for the heat diffusion usually suffers from large gradients, e.g. of thermal properties or initial temperatures, additional spatial grid points are added by default only for internal calculations.The number of additional points (should be an odd number, default is 11) is set by:
###Code
h.intp_at_interface = 11
###Output
_____no_output_____
###Markdown
The internally used spatial grid can be returned by:
###Code
dist_interp, original_indicies = S.interp_distance_at_interfaces(h.intp_at_interface)
###Output
_____no_output_____
###Markdown
The internal spatial grid can also be given by hand, e.g. to realize logarithmic steps for rather large `Structure`:
###Code
h.distances = np.linspace(0, distances.magnitude[-1], 100)*u.m
###Output
_____no_output_____
###Markdown
As already shown above, the heat diffusion simulation supports also an top and bottom boundary condition. The can have the types:* `isolator`* `temperature`* `flux`For the later types also a value must be provides:
###Code
h.boundary_conditions = {'top_type': 'temperature', 'top_value': 500*u.K,
'bottom_type': 'flux', 'bottom_value': 5e11*u.W/u.m**2}
print(h)
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :], label=np.round(delays[101]))
plt.plot(distances.to('nm').magnitude, temp_map[501, :], label=np.round(delays[501]))
plt.plot(distances.to('nm').magnitude, temp_map[-1, :], label=np.round(delays[-1]))
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.legend()
plt.title('Temperature Profile with Heat Diffusion and BC')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map, shading='auto')
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map with Heat Diffusion and BC')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
Multipulse ExcitationAs already stated above, also multiple pulses of variable fluence, pulse width and, delay are possible.The heat diffusion simulation automatically splits the calculation in parts with and without excitation and adjusts the initial temporal step width according to the pulse width.Hence the solver does not miss any excitation pulses when adjusting its temporal step size.The temporal laser pulse profile is always assumed to be Gaussian and the pulse width must be given as FWHM:
###Code
h.excitation = {'fluence': [5, 5, 5, 5]*u.mJ/u.cm**2,
'delay_pump': [0, 10, 20, 20.5]*u.ps,
'pulse_width': [0.1, 0.1, 0.1, 0.5]*u.ps,
'multilayer_absorption': True,
'wavelength': 800*u.nm,
'theta': 45*u.deg}
h.boundary_conditions = {'top_type': 'isolator', 'bottom_type': 'isolator'}
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.plot(distances.to('nm').magnitude, temp_map[101, :], label=np.round(delays[101]))
plt.plot(distances.to('nm').magnitude, temp_map[201, :], label=np.round(delays[201]))
plt.plot(distances.to('nm').magnitude, temp_map[301, :], label=np.round(delays[301]))
plt.plot(distances.to('nm').magnitude, temp_map[-1, :], label=np.round(delays[-1]))
plt.xlim([0, distances.to('nm').magnitude[-1]])
plt.xlabel('Distance [nm]')
plt.ylabel('Temperature [K]')
plt.legend()
plt.title('Temperature Profile Multiplulse')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map, shading='auto')
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map Multiplulse')
plt.tight_layout()
plt.show()
###Output
_____no_output_____
###Markdown
$N$-Temperature ModelThe heat diffusion is also capable of simulating an _N_-temperature model which is often applied to empirically simulate the energy flow between _electrons_, _phonons_, and _spins_.In order to run the _NTM_ all thermo-elastic properties must be given as a list of _N_ elements corresponding to different sub-systems.The actual external laser-excitation is always set to happen within the __first__ sub-system, which is usually the electron-system.In addition the `sub_system_coupling` must be provided in order to allow for energy-flow between the sub-systems.`sub_system_coupling` is often set to a constant prefactor multiplied with the difference between the electronic and phononic temperatures, as in the example below. For sufficiently high temperatures, this prefactor also depdends on temperature. See [here](https://faculty.virginia.edu/CompMat/electron-phonon-coupling/) for an overview. In case the thermo-elastic parameters are provided as functions of the temperature $T$, the `sub_system_coupling` requires the temperature `T` to be a vector of all sub-system-temperatures which can be accessed in the function string via the underscore-notation. The `heat_capacity` and `lin_therm_exp` instead require the temperature `T` to be a scalar of only the current sub-system-temperature. For the `therm_cond` both options are available.
###Code
# update the relevant thermo-elastic properties of the layers in the sample structure
SRO.therm_cond = [0,
5.72*u.W/(u.m*u.K)]
SRO.lin_therm_exp = [1.03e-5,
1.03e-5]
SRO.heat_capacity = ['0.112*T',
'455.2 - 2.1935e6/T**2']
SRO.sub_system_coupling = ['5e17*(T_1-T_0)',
'5e17*(T_0-T_1)']
STO_sub.therm_cond = [0,
12*u.W/(u.m*u.K)]
STO_sub.lin_therm_exp = [1e-5,
1e-5]
STO_sub.heat_capacity = ['0.0248*T',
'733.73 - 6.531e6/T**2']
STO_sub.sub_system_coupling = ['5e17*(T_1-T_0)',
'5e17*(T_0-T_1)']
###Output
Number of subsystems changed from 1 to 2.
Number of subsystems changed from 1 to 2.
###Markdown
As no new `Structure` is build, the `num_sub_systems` must be updated by hand.Otherwise this happens automatically.
###Code
S.num_sub_systems = 2
###Output
_____no_output_____
###Markdown
Set the excitation conditions:
###Code
h.excitation = {'fluence': [5]*u.mJ/u.cm**2,
'delay_pump': [0]*u.ps,
'pulse_width': [0.25]*u.ps,
'multilayer_absorption': True,
'wavelength': 800*u.nm,
'theta': 45*u.deg}
h.boundary_conditions = {'top_type': 'isolator', 'bottom_type': 'isolator'}
delays = np.r_[-5:15:0.01]*u.ps
# The resulting temperature profile is calculated in one line:
temp_map, delta_temp = h.get_temp_map(delays, 300*u.K)
plt.figure(figsize=[6, 8])
plt.subplot(2, 1, 1)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map[:, :, 0], shading='auto')
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map Electrons')
plt.subplot(2, 1, 2)
plt.pcolormesh(distances.to('nm').magnitude, delays.to('ps').magnitude, temp_map[:, :, 1], shading='auto')
plt.colorbar()
plt.xlabel('Distance [nm]')
plt.ylabel('Delay [ps]')
plt.title('Temperature Map Phonons')
plt.tight_layout()
plt.show()
plt.figure()
plt.plot(delays.to('ps'), np.mean(temp_map[:, S.get_all_positions_per_unique_layer()['SRO'], 0], 1), label='SRO electrons')
plt.plot(delays.to('ps'), np.mean(temp_map[:, S.get_all_positions_per_unique_layer()['SRO'], 1], 1), label='SRO phonons')
plt.ylabel('Temperature [K]')
plt.xlabel('Delay [ps]')
plt.legend()
plt.title('Temperature Electrons vs. Phonons')
plt.show()
###Output
_____no_output_____
|
HiSeqRuns_combined/04_assemblies/01_LLMGA/04_Ave/01_llmga-find-refs.ipynb
|
###Markdown
Table of Contents1 Goal2 Var3 Init4 Just Aves5 llmga-find-refs5.1 Run6 --WAITING--7 Summary8 sessionInfo Goal* Running `LLMGA-find-refs` pipeline on all Aves samples Var
###Code
work_dir = '/ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave/LLMGA-find-refs/'
base_dir = '/ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/'
bracken_taxIDs = file.path(base_dir, 'LLMGP_bracken_files.tsv')
metadata_file = '/ebio/abt3_projects/Georg_animal_feces/data/mapping/unified_metadata_complete_190529.tsv'
pipeline_dir = '/ebio/abt3_projects/Georg_animal_feces/bin/llmga-find-refs/'
###Output
_____no_output_____
###Markdown
Init
###Code
library(dplyr)
library(tidyr)
library(ggplot2)
source('/ebio/abt3_projects/Georg_animal_feces/code/misc_r_functions/init.R')
make_dir(dirname(work_dir))
make_dir(work_dir)
###Output
Created directory: /ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave
Created directory: /ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave/LLMGA-find-refs/
###Markdown
Just Aves
###Code
meta = read.delim(metadata_file, sep='\t') %>%
dplyr::select(SampleID, class, order, family, genus, scientific_name, diet, habitat)
meta %>% dfhead
taxIDs = read.delim(bracken_taxIDs, sep='\t') %>%
mutate(Sample = gsub('^XF', 'F', Sample))
taxIDs %>% dfhead
# joining
taxIDs = taxIDs %>%
inner_join(meta, c('Sample'='SampleID'))
taxIDs %>% dfhead
# all metadata
taxIDs %>%
group_by(class) %>%
summarize(n = n()) %>%
ungroup()
taxIDs_f = taxIDs %>%
filter(class == 'Aves')
taxIDs_f %>% dfhead
outF = file.path(work_dir, 'LLMGP_bracken_taxIDs_ave.tsv')
taxIDs_f %>%
arrange(class, order, family, genus) %>%
write.table(outF, sep='\t', quote=FALSE, row.names=FALSE)
cat('File written:', outF, '\n')
###Output
File written: /ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave/LLMGA-find-refs//LLMGP_bracken_taxIDs_ave.tsv
###Markdown
llmga-find-refs
###Code
F = file.path(work_dir, 'config.yaml')
cat_file(F)
###Output
#-- I/O --#
# table with sample --> read_file information
samples_file: /ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave/LLMGA-find-refs/LLMGP_bracken_taxIDs_ave.tsv
# output location
output_dir: /ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave/LLMGA-find-refs/
#-- database --#
centrifuge_db: /ebio/abt3_projects/databases_no-backup/centrifuge/p+h+v
taxonomy_db: /ebio/abt3_projects/databases_no-backup/NCBI_genome/NCBI_taxid_db/taxa.sqlite
#-- software parameters --#
params:
# If providing paired-end reads
seqtk: 1000000 # subsampled read pairs per sample; use "Skip" to skip
centrifuge: "" # for taxonomic profiling unless providing "TaxID" column
# If providing tables of TaxIDs instead of reads
taxID_column: taxonomy_id # column in the table(s) containing the taxIDs
abundance_column: fraction_total_reads # column in the table(s) containing the abundances
# Genome download & derep
abund_cutoff: 0.01 # taxon must have >= max(abundance) among samples
ncbi_genome_download: archaea,bacteria,viral -s refseq -l complete --retries 3
drep: --ignoreGenomeQuality --length 10000 --P_ani 0.9 --S_ani 0.99 # use "Skip" to skip
#-- snakemake pipeline --#
pipeline:
snakemake_folder: ./
script_folder: bin/scripts/
temp_folder: /tmp/global2/ # <-- your username will be added to this
###Markdown
Run ```(snakemake_dev) @ rick:/ebio/abt3_projects/Georg_animal_feces/bin/llmga-find-refs$ screen -L -S llmga-fr-ga-ave ./snakemake_sge.sh /ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave/LLMGA-find-refs/config.yaml cluster.json /ebio/abt3_projects/Georg_animal_feces/data/metagenome/HiSeqRuns126-133-0138/wOutVertebrata/MG_assembly_ave/LLMGA-find-refs/SGE_log 20``` Summary
###Code
# listing all reference genomes
ref_dir = file.path(work_dir, 'references', 'drep', 'dereplicated_genomes')
F = list.files(ref_dir, '*.fna', full.names=TRUE)
cat('Number of ref. genomes:', F %>% length, '\n')
# combining all refs
ref_file = file.path(work_dir, 'references', 'ref_genomes.fna')
if(file.exists(ref_file)){ file.remove(ref_file) }
file.create(ref_file)
for (f in F){
file.append(ref_file, f)
}
file.size(ref_file)
cat('File written:', ref_file, '\n')
###Output
_____no_output_____
###Markdown
sessionInfo
###Code
pipelineInfo(pipeline_dir)
sessionInfo()
###Output
_____no_output_____
|
HVAC/HVAC-Cooling-During-4CP.ipynb
|
###Markdown
HVAC vs ERCOT 4CP event alignment:This notebook explores how household cooling aligns with the State of Texas’s critical peak power events.We will be using one-minute interval data from 25 Texas homes and will determine what percentage of homes had air conditioning on during these CP events for each year from 2016-2019. To prepare the data we selected 25 Texas homes with PV and hvac system. We used 1 minute energy(from electricity.eg_realpower_1min) data for the selected homes for the CP dates mentioned below.
###Code
# These are the ERCOT 4CP events (start date/time and end date/time) for 2016 - 2019 acquired from
# http://mis.ercot.com/misapp/GetReports.do?reportTypeId=13037&reportTitle=Planned%20Service%20Four%20Coincident%20Peak%20Calculations&showHTMLView=&mimicKey
event_start_dates = ['2019-06-19 17:00:00-05', '2019-07-30 16:30:00-05', '2019-08-12 17:00:00-05', '2019-09-06 16:45:00-05',
'2018-06-27 17:00:00-05', '2018-07-19 17:00:00-05', '2018-08-23 16:45:00-05', '2018-09-19 16:30:00-05',
'2017-06-23 16:45:00-05', '2017-07-28 17:00:00-05', '2017-08-16 17:00:00-05', '2017-09-20 16:45:00-05',
'2016-06-15 17:00:00-05', '2016-07-14 16:00:00-05', '2016-08-11 16:30:00-05', '2016-09-19 16:15:00-05'
]
event_end_dates = ['2019-06-19 17:15:00-05', '2019-07-30 16:45:00-05', '2019-08-12 17:15:00-05', '2019-09-06 17:00:00-05',
'2018-06-27 17:15:00-05', '2018-07-19 17:15:00-05', '2018-08-23 17:00:00-05', '2018-09-19 16:45:00-05',
'2017-06-23 17:00:00-05', '2017-07-28 17:15:00-05', '2017-08-16 17:15:00-05', '2017-09-20 17:00:00-05',
'2016-06-15 17:15:00-05', '2016-07-14 16:15:00-05', '2016-08-11 16:45:00-05', '2016-09-19 16:30:00-05']
#import packages
import pandas as pd
import matplotlib.pyplot as plt
import os
from datetime import datetime as dt
import numpy as np
import sys
%matplotlib inline
sys.executable # shows you your path to the python you're using
#Read pre-processed data
df3 = pd.read_csv('/shared/JupyterHub-Examples-Data/hvac/hvac_cooling_4cp.zip', compression='zip')
homes_list = df3.dataid.unique()
#replace null with 0's
df3 = df3.fillna(0)
df3.head(10)
#create new column hvac.(hvac is sum of air compressor and furnace circuits.)
df3['hvac'] = df3['air1'] + df3['air2'] + df3['furnace1'] + df3['furnace2']
df3['localminute'] = pd.to_datetime(df3['localminute'])
df3['localminute'] = df3['localminute'].dt.date
df3.head(10)
#create new dataframe with columns dataid,hvac and date.
hvac_cp_data = pd.DataFrame(df3, columns = ['dataid','hvac', 'localminute'])
hvac_cp_data.head(10)
#group data by dataid and date and take average of hvac.
#This gives us avaerage hvac usage for each home for each CP event day.
hvac_cp_data_grouped = hvac_cp_data.groupby(['localminute','dataid']).mean()
reset_hvac_cp_data_grouped = hvac_cp_data_grouped.reset_index()
reset_hvac_cp_data_grouped.head(10)
#filtering data only for those interval when AC is actually cooling..
#We consider AC is ON and cooling only when it's drawing power of atleast 500W.
hvac_on = reset_hvac_cp_data_grouped[reset_hvac_cp_data_grouped['hvac'] > 0.5]
homes_on = (hvac_on['localminute'].value_counts()/25)*100
homes_on_perc = pd.DataFrame(homes_on.reset_index().values, columns=["Date", "Percentage"])
homes_on_percindex = homes_on_perc.sort_index(axis = 0, ascending=True)
homes_on_percindex
#plot bar chart
homes_on_percindex.plot.bar(x='Date', y='Percentage',figsize=(20,10),color='orange',title="Home's cooling alignment with Texas CP events")
plt.show()
homes_on_percindex['Percentage'].mean()
###Output
_____no_output_____
|
content/labs/lab7/cs109b_lab7_student_working.ipynb
|
###Markdown
CS109B Data Science 2: Advanced Topics in Data Science Lab 7 - Clustering**Harvard University****Spring 2019****Instructors**: Mark Glickman and Pavlos Protopapas
###Code
## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
%matplotlib inline
import rpy2
# import os
# os.environ['R_HOME'] = "/usr/share/anaconda3/lib/R"
from rpy2.robjects.packages import importr
r_utils = importr('utils')
#If there are errors about missing R packages, run the relevant code below:
#r_utils.install_packages('aplpack')
# r_utils.install_packages('TeachingDemos')
# r_utils.install_packages('ggplot2')
# r_utils.install_packages('cluster')
# r_utils.install_packages('factoextra')
# r_utils.install_packages('dbscan')
#
# #If you need to install ggplot2 version 2.2.1, then run the following code:
# r_utils.install_packages("devtools")
# r_remotes = importr("remotes")
# r_remotes.install_version("ggplot2",version="2.2.1")
###Output
_____no_output_____
###Markdown
Import and plot the dataWe begin, as always, by importing and exploring the data. Here, we're exploring the crazy cluster data from the end of yesterday's lecture. As you know, by the end we'll have an essentially automated method for extracting the pattern in these data. Python
###Code
multishapes = pd.read_csv("data/multishapes.csv")
scaled_df = multishapes[['x','y']]
scaled_df.describe()
scaled_df.plot.scatter(x='x',y='y',c='Black',title="Multishapes data",figsize=(11,8.5));
###Output
_____no_output_____
###Markdown
RWe'll work in parallel in R, as some later function work better in that language.
###Code
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r_scaled_df = pandas2ri.py2ri(scaled_df) # change pandas df to R df
###Output
_____no_output_____
###Markdown
There is an 'automatic' function in rpy2 to convert a pnadas data frame into an R object. However, this function isn't included in the package's documentation, so use it at your own risk.The old style of building data frames will always work.
###Code
r_scaled_df = rpy2.robjects.DataFrame({"x": rpy2.robjects.FloatVector(scaled_df['x']),
"y": rpy2.robjects.FloatVector(scaled_df['y'])
})
r_scaled_df
###Output
_____no_output_____
###Markdown
Discussion- Above, we named a data frame 'scaled', but didn't actually scale it! When we're clustering should we scale or not? KmeansWe kick things off with the old workhorse of clustering: KMeans. The "who cares, it runs" code is below, but first a small conceptual digression on how/why Kmeans does[n't] work: http://web.stanford.edu/class/ee103/visualizations/kmeans/kmeans.htmlLessons: - Initializations matter; run multiple times - Total Squared dsitance should never get worse during an update - KMeans can struggle with clusters that are close together; they can get lumped into one - There's no notion of 'not part of any cluster' or 'part of two clusters' Python
###Code
from sklearn.cluster import KMeans
fitted_km = KMeans(n_clusters=5, init='random', n_init=5, random_state=109).fit(scaled_df)
# n_init means we cluster 5 times and pick the best cluster result (even with the same n_clusters = number of clutsters)
display(fitted_km.cluster_centers_)
display(fitted_km.labels_[0:10])
###Output
_____no_output_____
###Markdown
Above, we see two useful components of the kmeans fit: the coordinates of the 5 cluster centers, and the clusters assigned to (the first few) points of data R In R, Kmeans is part of the 'stats' library, so we first `importr` 'stats', then call the `.kmeans` function within that grouping. As always, we refer to R's documentation to figure out the name of the function and its arguments. [Link to the documentation](https://www.rdocumentation.org/packages/stats/versions/3.5.2/topics/kmeans)We also set R's random number generator, which allows us to get exatcly reproducible results.
###Code
r_base = importr('base')
r_base.set_seed(109) #set seed for random number generation
r_stats = importr('stats')
r_km_out = r_stats.kmeans(scaled_df, 5, nstart=5)
display(r_km_out)
display(list(r_km_out.names))
display(r_km_out.rx2("size"))
###Output
_____no_output_____
###Markdown
Recall that R functions typically return something between a named list and a dictionary. You can display it directly, and access particular segments via `.rx2()` Plotting PythonAs of 2019, python doesn't have many specialized plotting methods, you mostly have to do it yourself. Take note of matplotlib's `c=` argument to color items in a plot, and stacking two different plotting functions in the same cell.
###Code
plt.scatter(scaled_df['x'],scaled_df['y'], c=fitted_km.labels_);
plt.scatter(fitted_km.cluster_centers_[:,0],fitted_km.cluster_centers_[:,1], c='r', marker='h', s=100);
###Output
_____no_output_____
###Markdown
RAs before, R is loaded with custom plotting. We're upgrading how we call R for plotting-- once you `%load_ext rpy2.ipython` (once per notebook, like `%matplotlib inline`) use %R to run a single line of R code, or %%R to run a whole cell of R code. Transfer R objects from python to r by adding `-i` and the object name on the %%R line.Summary: - `%load_ext rpy2.ipython` once - `%%R` when you want to run multiple lines of R code - Won't send outputs back into python - Will display plots in the notebook - Move robject variables to R via `-i`
###Code
%load_ext rpy2.ipython
%%R -i r_km_out -i r_scaled_df
library(factoextra)
fviz_cluster(r_km_out, r_scaled_df, geom = "point")
###Output
C:\Users\Will\Anaconda3\envs\109b\lib\site-packages\rpy2-2.9.4-py3.6-win-amd64.egg\rpy2\robjects\pandas2ri.py:191: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.
res = PandasDataFrame.from_items(items)
###Markdown
Selecting size: ElbowThis method measures the total (squared) distance from points to their cluster's centroid. Within a given cluster, this is *equivalent*, up to a factor of $2n$, to the summed (squared) distance from each point to the other points in the cluster (the phrasing we use in laer methods). Look for the place(s) where distance stops decreasing as much. Python
###Code
wss = []
for i in range(1,11):
fitx = KMeans(n_clusters=i, init='random', n_init=5, random_state=109).fit(scaled_df) # fit K-means 11 different times
wss.append(fitx.inertia_) # a measurement of how well a point is assigned to a cluster
plt.figure(figsize=(11,8.5))
plt.plot(range(1,11), wss, 'bx-')
plt.xlabel('Number of clusters $k$')
plt.ylabel('Inertia')
plt.title('The Elbow Method showing the optimal $k$')
plt.show()
###Output
_____no_output_____
###Markdown
RNearly all of our plots in R today come from the handy library `factoextra` (and nearly all of our plots will be from the `fviz_clust` function). Becuase we ran `library(factoextra)` in R earlier, the library is still loaded and we can invoke fviz_clust directly. Also, the `-i` is optional: R remembers what r_scaled_df is from before, but it can be wise to always send over the latest version.Summary: - R remembers things you've loaded, saved, or passed it before, just like variables persist across cells in jupyter - `library(factoextra)` and then `fviz_nbclust` is your main plotting tool - Remember to set `nstart=` above its default of 1 whenever you use Kmeans in R - Documentation [here](https://www.rdocumentation.org/packages/factoextra/versions/1.0.5/topics/fviz_nbclust)
###Code
%%R -i r_scaled_df
fviz_nbclust(r_scaled_df, kmeans, method="wss", nstart = 5)
###Output
C:\Users\Will\Anaconda3\envs\109b\lib\site-packages\rpy2-2.9.4-py3.6-win-amd64.egg\rpy2\robjects\pandas2ri.py:191: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.
res = PandasDataFrame.from_items(items)
###Markdown
Selecting size: SilhouetteSilhouette scores measure how close an observation is (on average) to points in its cluster, compared to the next-closest cluster's points. The range is [-1,1]; 0 indicates a point on the decision boundary (equal average closeness to points in both clusters), and negative values mean that datum might be better in a different cluster.The silhouette score plotted below is the average of the above score across all points. PythonThe silhouette score is a metric available in sklearn. We have to manually loop over values of K, calculate, and plot.
###Code
from sklearn.metrics import silhouette_score
# -1 not good, 0 = boundary
# pick the highest silhouette score
scores = [0]
for i in range(2,11):
fitx = KMeans(n_clusters=i, init='random', n_init=5, random_state=109).fit(scaled_df)
score = silhouette_score(scaled_df, fitx.labels_)
scores.append(score)
plt.figure(figsize=(11,8.5))
plt.plot(range(1,11), np.array(scores), 'bx-')
plt.xlabel('Number of clusters $k$')
plt.ylabel('Average Silhouette')
plt.title('The Elbow Method showing the optimal $k$')
plt.show()
###Output
_____no_output_____
###Markdown
RAgain, `fviz_clust` will do the work, we just need to pass it a different `method=`.
###Code
%%R -i r_scaled_df
fviz_nbclust(r_scaled_df, kmeans, method="silhouette", nstart = 5)
###Output
C:\Users\Will\Anaconda3\envs\109b\lib\site-packages\rpy2-2.9.4-py3.6-win-amd64.egg\rpy2\robjects\pandas2ri.py:191: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.
res = PandasDataFrame.from_items(items)
###Markdown
Selecting size: Gap StatisticThe gap statistic compares within-cluster distances (like in silhouette), but instead of comparing against the second-best existing cluster for that point, it compares our clustering's overall average to average we'd see when data don't have clusters at all.In essence, the within-cluster distances (in the elbow plot) will go down just becuse we have more clusters. We additionally calculate how much they'd go down on non-clustered data with the same spread as our data and subtract that trend out to produce the plot below. PythonAgain, we'd have to code it up ourselves. Though there are some implementations online, they're not ready for immediate use.
###Code
# you'd have to implement it yourself
###Output
_____no_output_____
###Markdown
RAnd again, `fviz_clust` will do the work, we just need to pass it a different `method=`.
###Code
%%R -i r_scaled_df
fviz_nbclust(r_scaled_df, kmeans, method="gap", nstart = 5)
###Output
_____no_output_____
###Markdown
Exercise 1- Determine the optimal number of clusters- Re-fit a KNN model with that number of clusters
###Code
num_cluster = 2
###Output
_____no_output_____
###Markdown
Asessing Fit: Silhouette PythonBelow, we borrow from an SKlearn example. The second plot may be overkill. - The second plot is JUST the first two dimensions in the data. It is *not* a PCA plot - If you only need the raw silhouette scores, use the `silhouette_samples` function
###Code
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.cm as cm
#modified code from http://scikit-learn.org/stable/auto_examples/cluster/plot_kmeans_silhouette_analysis.html
def silplot(X, clusterer, pointlabels=None):
cluster_labels = clusterer.labels_
n_clusters = clusterer.n_clusters
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(11,8.5)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters = ", n_clusters,
", the average silhouette_score is ", silhouette_avg,".",sep="")
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(0,n_clusters+1):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=200, lw=0, alpha=0.7,
c=colors, edgecolor='k')
xs = X[:, 0]
ys = X[:, 1]
if pointlabels is not None:
for i in range(len(xs)):
plt.text(xs[i],ys[i],pointlabels[i])
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1], marker='o',
c="white", alpha=1, s=200, edgecolor='k')
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % int(i), alpha=1,
s=50, edgecolor='k')
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
silplot(scaled_df.values, fitted_km)
###Output
For n_clusters = 5, the average silhouette_score is 0.41118597712860394.
###Markdown
RTo get the plot in R, we need to jump through a few hoops - import the `cluster` library (once) - call the `silhouette` function on cluster assignments and the inter-point distances - call `fviz_silhouette` on the result Luckily, we can run multiple lines of R at a time
###Code
%%R -i r_km_out -i r_scaled_df
library(cluster)
sil = silhouette(r_km_out$cluster, dist(r_scaled_df))
fviz_silhouette(sil)
###Output
C:\Users\Will\Anaconda3\envs\109b\lib\site-packages\rpy2-2.9.4-py3.6-win-amd64.egg\rpy2\robjects\pandas2ri.py:191: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.
res = PandasDataFrame.from_items(items)
###Markdown
Exercise 2 - Display the silhouette plots for your own modelDiscussion- How do we know which of two silhouette plots is better?
###Code
#your code here
###Output
_____no_output_____
###Markdown
AlgomerativeAglomerative clustering merges clusters together from the bottom up. There are many possible rules about which cluster(s) to combine next. Ward's rule wants to have the lowest possible total within-cluster distance, so it merges the two clusters that will harm this objective least. PythonScipy has Ward's method implemented, though the call sequence is a little convoluted, and the code is slow.
###Code
import scipy.cluster.hierarchy as hac
from scipy.spatial.distance import pdist
plt.figure(figsize=(11,8.5))
dist_mat = pdist(scaled_df, metric="euclidean")
ward_data = hac.ward(dist_mat)
hac.dendrogram(ward_data);
###Output
_____no_output_____
###Markdown
RR's code runs much faster, and the code is cleaner. Note that hclust is from the `cluster` library and you'd need to import it if you haven't already.
###Code
%%R -i r_scaled_df
stacked_cluster = hclust(dist(r_scaled_df), method = "ward.D")
plot(stacked_cluster)
###Output
_____no_output_____
###Markdown
Discussion- How do you read a plot like the above? What are valid options for number of clusters, and how can you tell? Are some more valid than others? DBscanDBscan is one of many alternative clustering algorithms, that uses an intuitive notion of denseness to define clusters, rather than defining them by a central point as in Kmeans. PythonDBscan is implemented in good 'ol sklearn, but there aren't great tools for working out the epsilon parameter.
###Code
# I couldn't find any easy code for epsilon-tuning plot
from sklearn.cluster import DBSCAN
fitted_dbscan = DBSCAN(eps=0.15).fit(scaled_df)
plt.scatter(scaled_df['x'],scaled_df['y'], c=fitted_dbscan.labels_);
###Output
_____no_output_____
###Markdown
RR's dbscan is in the `dbscan` library. It comes with `kNNdistplot` for tuning epsilon, and `fviz_cluster` will make a nice plot.
###Code
%%R -i r_scaled_df
library(dbscan)
kNNdistplot(r_scaled_df,k=5)
###Output
C:\Users\Will\Anaconda3\envs\109b\lib\site-packages\rpy2-2.9.4-py3.6-win-amd64.egg\rpy2\robjects\pandas2ri.py:191: FutureWarning: from_items is deprecated. Please use DataFrame.from_dict(dict(items), ...) instead. DataFrame.from_dict(OrderedDict(items)) may be used to preserve the key order.
res = PandasDataFrame.from_items(items)
###Markdown
Remember, we can import and run functions like dbscan within Python
###Code
r_dbscan = importr("dbscan")
r_db_out = r_dbscan.dbscan(r_scaled_df, eps=0.15, minPts = 5)
###Output
_____no_output_____
###Markdown
Or run directly in R
###Code
%%R -i r_scaled_df
r_db_out = dbscan(r_scaled_df, eps=0.15, minPts=5)
fviz_cluster(r_db_out, r_scaled_df, ellipse = FALSE, geom = "point")
###Output
_____no_output_____
###Markdown
Exercise 3Discussion- Use cross validation to select the optimal values of N and epsilon
###Code
#your code here?
###Output
_____no_output_____
###Markdown
Streching: The arrest data and guided practiceIn this section you get to transfer the skills and code we learned above to new data.Because of how much this resembles your individual HW, we'll go over the solutions in lab, but not post them in writing.As always, we start by loading and exploring the data
###Code
arrest_data = pd.read_csv("data/USArrests.csv")
arrest_data['A'] = arrest_data['A'].astype('float64')
arrest_data['UrbanPop'] = arrest_data['UrbanPop'].astype('float64')
arrest_data.head()
###Output
_____no_output_____
###Markdown
Exercise - Scale the data Discussion - Or don't
###Code
#your code here
###Output
_____no_output_____
###Markdown
Exercise - Convert the pandas dataframe to R
###Code
#your code here
###Output
_____no_output_____
###Markdown
Exercise- How many [KMeans] clusters do the 50 states fall into?- Remember: we've seen three different methods for determining number of clusters
###Code
#your code here
#your code here
#your code here
###Output
_____no_output_____
###Markdown
Exercise- Fit a k-means cclustering with the number of clusters you think is best
###Code
#your code here
###Output
_____no_output_____
###Markdown
Exercise - How good is your clustering?
###Code
#your code here
###Output
_____no_output_____
###Markdown
Exercise - Run an aglomerative clustering. - What's the benefit of this method? How many clusters does it suggest?
###Code
#your code here
###Output
_____no_output_____
###Markdown
Exercise - Run dbscan on this data. Remember to tune epsilon. - How well does it perform?
###Code
#your code here
###Output
_____no_output_____
###Markdown
Discussion- How did you synthsize the different suggestions for number of clusters?- What method, and what clustering seems correct?- Why is clustering useful? Bonus: A Hint About Why Clustering The States Is Hard
###Code
numeric_cols = ['M','A','UrbanPop','R']
pca = PCA()
scaled_df = pd.DataFrame(StandardScaler().fit_transform(arrest_data[numeric_cols]),
columns = arrest_data[numeric_cols].columns,
index = arrest_data.index)
pca_df = pca.fit_transform(scaled_df)
def biplot(scaled_data, fitted_pca, axis_labels, point_labels):
pca_results = fitted_pca.transform(scaled_data)
pca1_scores = pca_results[:,0]
pca2_scores = pca_results[:,1]
# plot each point in 2D post-PCA space
plt.scatter(pca1_scores,pca2_scores)
# label each point
for i in range(len(pca1_scores)):
plt.text(pca1_scores[i],pca2_scores[i], point_labels[i])
#for each original dimension, plot what an increase of 1 in that dimension means in this space
for i in range(fitted_pca.components_.shape[1]):
raw_dims_delta_on_pca1 = fitted_pca.components_[0,i]
raw_dims_delta_on_pca2 = fitted_pca.components_[1,i]
plt.arrow(0, 0, raw_dims_delta_on_pca1, raw_dims_delta_on_pca2 ,color = 'r',alpha = 1)
plt.text(raw_dims_delta_on_pca1*1.1, raw_dims_delta_on_pca2*1.1, axis_labels[i], color = 'g', ha = 'center', va = 'center')
plt.figure(figsize=(11,8.5))
plt.xlim(-3.5,3.5)
plt.ylim(-3.5,3.5)
plt.xlabel("PC{}".format(1))
plt.ylabel("PC{}".format(2))
plt.grid()
biplot(scaled_df, pca, axis_labels=scaled_df.columns, point_labels=arrest_data['State'])
###Output
_____no_output_____
|
notebooks/Search Engine For Candidate Sentences.ipynb
|
###Markdown
Search Engine For Candidate Sentences Demonstration of how to use the simple search engine for fetching relevant sentences Let's import our search engine for `src` directory.First, one needs to set the Python source files environment variables for Juptyer Notebook. If you haven't done this, please run those two command BEFORE running Juptyer Notebook:1. `export PYTHONPATH=/path/to/covid19/src`2. `export JUPYTER_PATH=/path/to/covid19/src`
###Code
import pandas as pd
from gensim.models.phrases import Phraser
from nlp.cleaning import clean_tokenized_sentence
from w2v.synonyms import Synonyms
from pprint import pprint
import operator
from typing import List
from datetime import datetime
import os
data_dir = "../../../workspace/kaggle/covid19/data"
###Output
_____no_output_____
###Markdown
Initialize out SearchEngine object with:1. Sentences metadata2. bi-gram model3. tri-gram model4. Trained FastText vectors
###Code
sentences_df = pd.read_csv(os.path.join(data_dir, "sentences_with_metadata.csv"))
def create_articles_metadata_mapping(sentences_df: pd.DataFrame) -> dict:
sentence_id_to_metadata = {}
for row_count, row in sentences_df.iterrows():
sentence_id_to_metadata[row_count] = dict(
paper_id=row['paper_id'],
cord_uid=row['cord_uid'],
source=row['source'],
url=row['url'],
publish_time=row['publish_time'],
authors=row['authors'],
section=row['section'],
sentence=row['sentence'],
)
return sentence_id_to_metadata
sentence_id_to_metadata = create_articles_metadata_mapping(sentences_df)
synonyms_model = Synonyms(os.path.join(data_dir, "fasttext_no_subwords_trigrams/word-vectors-100d.txt"))
class SearchEngine:
def __init__(self,
sentence_id_to_metadata: dict,
sentences_df: pd.DataFrame,
bigram_model_path: str,
trigram_model_path: str,
synonyms_model):
self.sentence_id_to_metadata = sentence_id_to_metadata
self.cleaned_sentences = sentences_df['cleaned_sentence'].tolist()
print(f'Loaded {len(self.cleaned_sentences)} sentences')
print(f'Loading bi-gram model: {bigram_model_path}')
self.bigram_model = Phraser.load(bigram_model_path)
print(f'Finished loading bi-gram model: {bigram_model_path}')
print(f'Loading tri-gram model: {trigram_model_path}')
self.trigram_model = Phraser.load(trigram_model_path)
print(f'Finished loading tri-gram model: {trigram_model_path}')
self.synonyms_model = synonyms_model
def _get_search_terms(self, keywords, synonyms_threshold):
# clean tokens
cleaned_terms = [clean_tokenized_sentence(keyword.split(' ')) for keyword in keywords]
# remove empty terms
cleaned_terms = [term for term in cleaned_terms if term]
# create bi-grams
terms_with_bigrams = self.bigram_model[' '.join(cleaned_terms).split(' ')]
# create tri-grams
terms_with_trigrams = self.trigram_model[terms_with_bigrams]
# expand query with synonyms
search_terms = [self.synonyms_model.get_synonyms(token) for token in terms_with_trigrams]
# filter synonyms above threshold (and flatten the list of lists)
search_terms = [synonym[0] for synonyms in search_terms for synonym in synonyms
if synonym[1] >= synonyms_threshold]
# expand keywords with synonyms
search_terms = list(terms_with_trigrams) + search_terms
return search_terms
def search(self,
keywords: List[str],
optional_keywords=None,
top_n: int = 10,
synonyms_threshold=0.7,
keyword_weight: float = 3.0,
optional_keyword_weight: float = 0.5) -> List[dict]:
if optional_keywords is None:
optional_keywords = []
search_terms = self._get_search_terms(keywords, synonyms_threshold)
optional_search_terms = self._get_search_terms(optional_keywords, synonyms_threshold) \
if optional_keywords else []
print(f'Search terms after cleaning, bigrams, trigrams and synonym expansion: {search_terms}')
print(f'Optional search terms after cleaning, bigrams, trigrams and synonym expansion: {optional_search_terms}')
date_today = datetime.today()
# calculate score for each sentence. Take only sentence with at least one match from the must-have keywords
indexes = []
match_counts = []
days_diffs = []
for sentence_index, sentence in enumerate(self.cleaned_sentences):
sentence_tokens = sentence.split(' ')
sentence_tokens_set = set(sentence_tokens)
match_count = sum([keyword_weight if keyword in sentence_tokens_set else 0
for keyword in search_terms])
if match_count > 0:
indexes.append(sentence_index)
if optional_search_terms:
match_count += sum([optional_keyword_weight if keyword in sentence_tokens_set else 0
for keyword in optional_search_terms])
match_counts.append(match_count)
article_date = self.sentence_id_to_metadata[sentence_index]["publish_time"]
if article_date == "2020":
article_date = "2020-01-01"
article_date = datetime.strptime(article_date, "%Y-%m-%d")
days_diff = (date_today - article_date).days
days_diffs.append(days_diff)
# the bigger the better
match_counts = [float(match_count)/sum(match_counts) for match_count in match_counts]
# the lesser the better
days_diffs = [(max(days_diffs) - days_diff) for days_diff in days_diffs]
days_diffs = [float(days_diff)/sum(days_diffs) for days_diff in days_diffs]
index_to_score = {}
for index, match_count, days_diff in zip(indexes, match_counts, days_diffs):
index_to_score[index] = 0.7 * match_count + 0.3 * days_diff
# sort by score descending
sorted_indexes = sorted(index_to_score.items(), key=operator.itemgetter(1), reverse=True)
# take only the sentence IDs
sorted_indexes = [item[0] for item in sorted_indexes]
# limit results
sorted_indexes = sorted_indexes[0: min(top_n, len(sorted_indexes))]
# get metadata for each sentence
results = []
for index in sorted_indexes:
results.append(self.sentence_id_to_metadata[index])
return results
search_engine = SearchEngine(
sentence_id_to_metadata,
sentences_df,
os.path.join(data_dir, "covid_bigram_model_v0.pkl"),
os.path.join(data_dir, "covid_trigram_model_v0.pkl"),
synonyms_model
)
###Output
Loaded 249343 sentences
Loading bi-gram model: ../../../workspace/kaggle/covid19/data/covid_bigram_model_v0.pkl
Finished loading bi-gram model: ../../../workspace/kaggle/covid19/data/covid_bigram_model_v0.pkl
Loading tri-gram model: ../../../workspace/kaggle/covid19/data/covid_trigram_model_v0.pkl
Finished loading tri-gram model: ../../../workspace/kaggle/covid19/data/covid_trigram_model_v0.pkl
###Markdown
Simple search function that gets a list of keywords to search:
###Code
def search(keywords, optional_keywords=None, top_n=10, synonyms_threshold=0.8, only_sentences=False):
print(f"\nSearch for terms {keywords}\n\n")
results = search_engine.search(
keywords, optional_keywords=optional_keywords, top_n=top_n, synonyms_threshold=synonyms_threshold
)
print("\nResults:\n")
if only_sentences:
for result in results:
print(result['sentence'] + "\n")
else:
pprint(results)
###Output
_____no_output_____
###Markdown
Let's see some examples:
###Code
search(keywords=["animals", "zoonotic", "spillover", "animal to human",
"bats", "snakes", "exotic animals", "seafood"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"])
search(keywords=["seasonality", "transmission", "humidity", "heat", "summer"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"])
search(["incubation_time", "incubation", "age"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=20, only_sentences=True)
search(["Prevalence", "asymptomatic", "shedding", "transmission", "children"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=20, only_sentences=True)
search(["seasonality", "transmission", "humidity", "heat", "summer"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["adhesion", "hydrophilic", "hydrophobic", "surfaces", "decontamination"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["Persistence", "stability","nasal discharge", "sputum", "urine", "fecal matter"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["Persistence", "materials", "copper", "stainless steel", "plastic"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["natural", "history", "virus", "infected"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["implementation", "diagnostics", "product", "clinical", "process"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["disease models", "animals", "infection", "transmission"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["Tools", "studies", "monitor", "phenotypic change", "potential adaptation", "virus", "mutation"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["Immune", "Immunity", "Immune response"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["Effectiveness", "movement control", "restrictions", "strategy", "prevent secondary transmission"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["Effectiveness", "personal protective equipment", "PPE", "strategy", "prevent transmission"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
search(["seasonality", "transmission", "humidity", "heat", "summer"],
optional_keywords=["new_coronavirus", "coronavirus", "covid19"],
top_n=10, only_sentences=True)
###Output
Search for terms ['seasonality', 'transmission', 'humidity', 'heat', 'summer']
Search terms after cleaning, bigrams, trigrams and synonym expansion: ['seasonality', 'transmission', 'humidity_heat', 'summer', 'seasonal_patterns', 'seasonal_variation', 'seasonal_pattern', 'seasonal_trends', 'seasonality_influenza', 'seasonal_variations', 'trans_mission', 'transmissions', 'disease_transmission', 'contact_transmission', 'overall_discomfort', 'microclimate_temperature_humidity', 'emotional_benefits', 'perceived_comfort', 'n95mask_combination', 'perceived_exertion', 'thermophysiological', 'nanofunctional', 'physical_discomfort', 'subjective_ratings', 'winter', 'autumn', 'during_winter', 'during_summer', 'rainy', 'winter_summer', 'summer_autumn', 'fall_winter', 'cold_winter', 'during_winter_spring']
Optional search terms after cleaning, bigrams, trigrams and synonym expansion: ['newcoronavirus', 'coronavirus_covid19', '2019ncov_covid19', 'outbreak_2019_novel', 'sarscov2_2019ncov', 'coronavirus_2019ncov', 'ongoing_outbreak_novel_coronavirus', 'since_late_december', 'ongoing_outbreak_covid19', 'originating_wuhan_china', 'novel_coronavirus_outbreak', 'wuhan_coronavirus']
Results:
This variation in transmission strength may be driven by a variety of factors, including increased indoor crowding in the winter, the onset of the school term in the autumn, and climate factors (5) .
Even though there are many uncertainties relating to seasonality and this novel coronavirus [33] , it seems prudent to assume some seasonal fluctuation so we increased the average by 25% in winter and reduced it by 25% in summer (with a sinusoidal variation throughout the simulated year), using a mid-winter peak for Europe of 15 January (i.e., day 334 of the simulation).
21 They estimated that a 50% reduction in transmission during summer months would result in a smaller epidemic before the summer, followed by a resurgence in cases in the following winter.
As the southern hemisphere moves from summer to autumn and swiftly into winter, the transmission of all respiratory viruses will increase, including SARS-CoV-2 if introduced into that country.
If SARS-CoV-2 demonstrates seasonality like influenza and other respiratory tract viruses, a decline in cases during summer would provide time to prepare for the following transmission season.
A 50% reduction in transmission results in a smaller epidemic before the summer, followed by a resurgence in cases in the following winter.
If SARS-Cov-2 transmission is similarly subject to seasonal forcing, summer outbreaks would naturally have lower peaks than winter outbreaks.
Due to seasonal variation in transmission strength, it may be more difficult to flatten epidemic curves in the winter than in the summer.
same magnitude as the winter vs summer mortality excess) then the number of excess deaths would be 0 when 1 in 100 000, 14861 when 1 in 10 and 118885 when 8 in 10 are infected.
Variation in R₀ of 25% Winter conditions are known to accelerate transmission of influenza and also the other coronaviruses which cause common cold like symptoms [31] .
|
notebooks/xgboost/test_xgboost_classifier.ipynb
|
###Markdown
XGBOOST - TEST
###Code
import numpy as np
from sklearn import datasets
import xgboost as xgb
from xgboost.sklearn import XGBClassifier
import pandas as pd
from datetime import datetime
from time import time
from sklearn.metrics import balanced_accuracy_score
%matplotlib inline
import matplotlib.pyplot as plt
###Output
_____no_output_____
###Markdown
GLOBAL VARIABLES
###Code
DATAPATH = 'data/train_test/'
SEED = 47
NITER = 100
CV = 3
SCORE = 'balanced_accuracy'
handlingnull = False
NJOBS = 7
USEGPU = False
NCLASS = 3 # number class to predict (if bivar set 0)
###Output
_____no_output_____
###Markdown
FUNCTIONS
###Code
def plot_roc_curve(y_true, y_score):
logit_roc_auc = roc_auc_score(y_true, y_score)
fpr, tpr, thresholds = roc_curve(y_true, y_score)
plt.figure()
plt.plot(fpr, tpr, label='Xgboost (area = %0.2f)' % logit_roc_auc)
plt.plot([0, 1], [0, 1],'r--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic')
plt.legend(loc="lower right")
plt.show()
pass
def plot_importance(booster, figsize=(10,5), feature_names=None, **kwargs):
fig, ax = plt.subplots(1,1,figsize=figsize)
if feature_names:
mapper = {'f{0}'.format(i): v for i, v in enumerate(feature_names)}
mapped = {mapper[k]: v for k, v in booster.get_fscore().items()}
return xgb.plot_importance(mapped, ax=ax, **kwargs)
else:
return xgb.plot_importance(booster=booster, ax=ax, **kwargs)
pass
def plot_score_dist(y_true, y_score):
df = pd.DataFrame(data=np.column_stack((y_true, y_score)), columns=['target','score'])
plt.figure(figsize=(7, 4))
ax = sns.boxplot(x="target", y="score", data=df)
ax.set_title("Score distribution")
pass
###Output
_____no_output_____
###Markdown
LOAD DATASET
###Code
train_features = np.load(DATAPATH+'X_wavelet_discrete_train.npy')
train_labels = np.load(DATAPATH+'y_wavelet_discrete_train.npy')
test_features = np.load(DATAPATH+'X_wavelet_discrete_test.npy')
test_labels = np.load(DATAPATH+'y_wavelet_discrete_test.npy')
feature_names = np.load('data/features/X_wavelet_discrete_names.npy').tolist()
###Output
_____no_output_____
###Markdown
TRAIN MODEL Set hyperparameters
###Code
# ======== General Parameters ======= #
# Select the type of model to run at each iteration. gbtree or gblinear.
booster = 'gbtree'
# ======== Booster Parameters ======== #
# Analogous to learning rate in GBM.
# Typical final values to be used: 0.01-0.2
eta = [0.01]
# Control the balance of positive and negative weights, useful for unbalanced classes.
# A typical value to consider: sum(negative instances) / sum(positive instances)scale_pos_weight = 1
scale_pos_weight = [5,1,2]
# Learning Task Parameters
# This defines the loss function to be minimized.
# - binary:logistic –logistic regression for binary classification, returns predicted probability (not class)
# - multi:softmax –multiclass classification using the softmax objective, returns predicted class (not probabilities)
# you also need to set an additional num_class (number of classes) parameter defining the number of unique classes
# - multi:softprob –same as softmax, but returns predicted probability of each data point belonging to each class.
objective = 'multi:softprob'
# The metric to be used for validation data.
# - rmse – root mean square error
# - mae – mean absolute error
# - logloss – negative log-likelihood
# - error – Binary classification error rate (0.5 threshold)
# - merror – Multiclass classification error rate
# - mlogloss – Multiclass logloss
# - auc: Area under the curve
eval_metric = 'mlogloss'
xgb_params = np.load('output/hyperparameters/rseach_xgboost_classifier_bestparams_d2019-11-09.npy', allow_pickle=True).tolist()
xgb_params
xgb_params['n_estimators'] = 54
xgb_params['seed'] = SEED
xgb_params['booster'] = booster
xgb_params['objective'] = objective
xgb_params['eval_metric'] = eval_metric
xgb_params['num_threads'] = NJOBS
xgb_params['num_class'] = NCLASS
xgb_params['verbose'] = 0
xgb_params['scale_pos_weight'] = scale_pos_weight
if USEGPU:
xgb_params['tree_method'] = 'gpu_hist'
xgb_params['gpu_id'] = 0
### create a DMatrix and handling Null values
if handlingnull:
#train_features[np.isnan(train_features)] = -9999
xgtrain = xgb.DMatrix(train_features, train_labels, missing=-9999)
xgtest = xgb.DMatrix(test_features, missing=-9999)
else:
xgtrain = xgb.DMatrix(train_features, train_labels)
xgtest = xgb.DMatrix(test_features, missing=-9999)
model = xgb.train(xgb_params, xgtrain, verbose_eval=False)
y_pred = model.predict(xgtest)
print(SCORE,' : ', balanced_accuracy_score(test_labels, np.argmax(y_pred,axis=1)))
plot_importance(model, figsize=(20,20), feature_names=feature_names, max_num_features=50, color='red')
pass
df = pd.DataFrame(data=np.column_stack((test_labels,np.argmax(y_pred,axis=1))), columns=['target','score'])
df['score'].hist()
pass
###Output
_____no_output_____
|
_notebooks/2021-07-28-joining-excels.ipynb
|
###Markdown
Uniendo archivos Excel> Automatizando unir archivos históricos- featured: false- hide: false- toc: false - badges: true- comments: true- categories: [jupyter, rise]- image: images/preview/unir-excels.png ContextoHoy me pidieron unir varios archivos excel en un único archivo.Tomé las siguientes convenciones:* Los archivos excel están en la carpeta `in`, y tienen las mismas columnas en el mismo orden.* La carpeta `out` tiene contendrá 2 archivos: un arhivo excel con la concatenación de todos los archivos, y una versión donde se mantuvo un único registro por rut (el último). SoluciónPandas
###Code
import pandas as pd
from glob import glob
# Leer todos los archivos excel de la carpeta in
all_files = glob("in/*.xlsx")
# Get columns
df = pd.read_excel(all_files[0])
columns = df.columns
# Unir los archivos en una lista
df_list = []
cols_list = []
for in_file in all_files:
print(in_file)
df = pd.read_excel(in_file)
df.columns = columns # Renombrar las columnas, mantener la convención del primer archivo
df["Archivo"] = in_file
df["#Fila"] = df.index
df_list.append(df)
# Concatenar
df_master = pd.concat(df_list)
# Sacando duplicados considerando único rut
df_last = df_master.drop_duplicates(subset="RUT COMPLETO", keep='last')
# Guardar en excel, cada resultado en una pestaña
writer = pd.ExcelWriter('out/Franquicia_joined.xlsx', engine='xlsxwriter')
df_master.to_excel(writer, sheet_name="todos", index=False)
df_last.to_excel(writer, sheet_name="ultimo_segun_Rut", index=False)
writer.save()
###Output
_____no_output_____
|
scripts/publication/Figure5.ipynb
|
###Markdown
Figure 5 Generation
###Code
from matplotlib import pyplot as plt
from venn import venn
%matplotlib inline
import matplotlib.pyplot as plt
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# saves text as editable text for illustrator
import pandas as pd
from venn import pseudovenn
cd /users/madelinepark/Downloads/differentTreesFacsTfsResults
facs50 = pd.read_csv('50_facs_tfs_age.csv')
facs100 = pd.read_csv('100_facs_tfs_age.csv')
facs500 = pd.read_csv('500_facs_tfs_age.csv')
facs1000 = pd.read_csv('1000_facs_tfs_age.csv')
facs2000 = pd.read_csv('2000_facs_tfs_age.csv')
# facs5000 = pd.read_csv('senis_wholefacs_tfs_age_5000 (1).csv')
facs5000 = pd.read_csv('senis_wholefacs_tfs_age_5000 (1).csv')
facsNest = {
# 'facs50' :set(facs50['18m']),
'nEstimators = 100' :set(facs100['18m']),
'nEstimators = 500' :set(facs500['18m']),
'nEstimators = 1000' :set(facs1000['18m']),
'nEstimators = 2000' :set(facs2000['18m']),
'nEstimators = 5000' :set(facs5000['18m'])
}
venn(facsNest)
plt.savefig('facs18mDifferentNest.pdf')
###Output
_____no_output_____
|
sam_reproduction.ipynb
|
###Markdown
Build datasets from Olge et al. (2015)
###Code
import re
from pathlib import Path
import pandas as pd
import PyPDF2
datapath = Path("/Users/rwhitley/Repositories/Projects/stochastic_antecedant_modelling/")
appendix_path = list(datapath.glob("*.pdf")).pop()
with open(str(appendix_path), 'rb') as f:
fileReader = PyPDF2.PdfFileReader(f)
npages = fileReader.numPages
text_pages = []
start = 0
for n in range(npages):
text = fileReader.getPage(n).extractText()
if re.search("Data set", text) or (start == 1):
start = 1
text_pages.append(text)
text_raw1 = '\n'.join(text_pages).split('Data set')
text_raw2 = [re.sub(r'.+?\.(?= Year)', '', tr).split("\n") \
for tr in text_raw1 if re.search("Year", tr)]
text_raw3 = {"Data set %d" % (i + 2): \
[[x for x in row.split(" ") if (x is not '')] \
for row in ds_text if row is not ''] \
for (i, ds_text) in enumerate(text_raw2)}
dataset_2 = [row if len(row) <=7 else row[1:] for row in text_raw3['Data set 2'] if len(row) > 1]
headers = [re.sub("\[|\]|,", "", c) for c in dataset_2[0]]
df2 = pd.DataFrame(dataset_2[1:], columns=headers)
df2.head()
[[r if len(r) < 6 else r.rsplit(".", 1) for r in row] \
for row in text_raw3['Data set 3'][1:] if len(row) > 1]
###Output
_____no_output_____
|
data/insert_records_into_mongo.ipynb
|
###Markdown
Connect to MongoDB
###Code
# set vars
auth = pd.read_csv('auth.txt', header = None) # this auth.txt file contains the password for the sketchloop user
pswd = auth.values[0][0]
user = 'sketchloop'
host = 'cogtoolslab.org' ## cocolab ip address
conn = pm.MongoClient('mongodb://sketchloop:' + pswd + '@127.0.0.1')
db = conn['stimuli']
if 'semantic_parts_graphical_conventions' in db.list_collection_names():
coll = db['semantic_parts_graphical_conventions']
coll.drop()
coll = db['semantic_parts_graphical_conventions']
#DB = stimuli
#COLL = semantic_parts_graphical_conventions
###Output
_____no_output_____
###Markdown
Read in csv file and insert into database
###Code
file_path = 'graphical_conventions_group_data_run5_submitButton_with_TargetURL.csv'
x = pd.read_csv(file_path)
## convert to json
y = x.to_json(orient='records')
z = json.loads(y)
## loop through list of records and insert each into collection
reallyRun = 1
if reallyRun:
for i,_z in enumerate(z):
coll.insert_one(_z)
print('Inserted {} records.'.format(i))
clear_output(wait=True)
else:
print('Did not insert any new data.')
###Output
Inserted 2599 records.
###Markdown
inspect & validate collection (to make sure records look right)¶
###Code
print('There are {} records in this collection.'.format(coll.count()))
z[0]
###Output
_____no_output_____
###Markdown
Visualize the structure
###Code
import networkx as nx
import matplotlib.pyplot as plt
from networkx.drawing.nx_agraph import write_dot, graphviz_layout
f = plt.figure(figsize = ((24,24)))
G = nx.Graph()
f = open("Chair.txt", "r")
edges = set()
mapping = {}
for line in f:
path = line.split(' ')[1]
edge = path.split('/')
for i in range (0 ,len (edge)-1):
one = '/'.join(edge[:i+1])
two = '/'.join(edge[:i+2])
edges.add((one,two))
mapping[one] = one.split('/')[-1]
mapping[two] = two.split('/')[-1]
for i, j in edges:
G.add_edge(i,j)
nx.draw(G,labels = mapping)
plt.show()
###Output
_____no_output_____
|
house-prices/notebooks/pipeline.ipynb
|
###Markdown
Submit
###Code
X_test = pd.read_parquet(f"{data_dir}/interim/test.parq", engine='pyarrow')
display(X_test.shape)
X_test.head(2)
X_test[categorical_cols] = X_test[categorical_cols].astype('category')
preds_test = grid_search.best_estimator_.predict(X_test)
output = pd.DataFrame(
{'Id': X_test.index, 'SalePrice': preds_test})
output.to_csv(f"{data_dir}/processed/submission.csv", index=False)
###Output
_____no_output_____
|
JupyterNotebook/Abruptly Goblins Planner.ipynb
|
###Markdown
IntroductionOpening your comic book store, the Sorcery Society, has been a lifelong dream come true. You quickly diversified your shop offerings to include miniatures, plush toys, collectible card games, and board games. Eventually, the store became more a games store with a selection of this week's newest comic books and a small offering of graphic novel paperbacks. Completing your transformation means offering space for local tabletop gamers. They love to play their favorite RPG, "Abruptly Goblins!" and will happily pay you per chair to secure the space to do it. Unfortunately, planning the game night has fallen to you. If you pick the wrong night, not enough people will come and the game night will be cancelled. You decide it's best that you automate the game night selector to get the most people through the door. First you need to create a list of people who will be attending the game night.**Instructions**Create an empty list called `gamers`. This will be your list of people who are attending game night.
###Code
gamers = []
###Output
_____no_output_____
###Markdown
Now we want to create a function that will update this list and add a new gamer to the this `gamers` list. Each `gamer` should be a dictionary with the following keys: - `"name"`: a string that contains the gamer's full or presumed name. E.g., "Vicky Very" - `"availability"`: a list of strings containing the names of the days of the week that the gamer is available. E.g., ["Monday", "Thursday", "Sunday"] **Instructions**Create a function called `add_gamer` that takes two parameters: `gamer` and `gamers_list`. The function should check that the argument passed to the `gamer` parameter has both `"name"` and a `"availability"` as keys and if so add `gamer` to `gamers_list`.
###Code
def add_gamer(gamer, gamers_list):
if gamer.get("name") and gamer.get("availability"):
gamers_list.append(gamer)
else:
print("Gamer missing critical information")
###Output
_____no_output_____
###Markdown
Next we want to add our first gamer! Her name is Kimberly Warner and she's available on Mondays, Tuesdays, and Fridays.**Instructions**1. Create a dictionary called `kimberly` with the name and availability given above.2. Call `add_gamer` with `kimberly` as the first argument and `gamers` as the second.
###Code
kimberly = {
'name': "Kimberly Warner",
'availability': ["Monday", "Tuesday", "Friday"]
}
add_gamer(kimberly, gamers)
print(gamers)
###Output
[{'name': 'Kimberly Warner', 'availability': ['Monday', 'Tuesday', 'Friday']}]
###Markdown
Great! Let's add a couple more gamers to the list!
###Code
add_gamer({'name':'Thomas Nelson','availability': ["Tuesday", "Thursday", "Saturday"]}, gamers)
add_gamer({'name':'Joyce Sellers','availability': ["Monday", "Wednesday", "Friday", "Saturday"]}, gamers)
add_gamer({'name':'Michelle Reyes','availability': ["Wednesday", "Thursday", "Sunday"]}, gamers)
add_gamer({'name':'Stephen Adams','availability': ["Thursday", "Saturday"]}, gamers)
add_gamer({'name': 'Joanne Lynn', 'availability': ["Monday", "Thursday"]}, gamers)
add_gamer({'name':'Latasha Bryan','availability': ["Monday", "Sunday"]}, gamers)
add_gamer({'name':'Crystal Brewer','availability': ["Thursday", "Friday", "Saturday"]}, gamers)
add_gamer({'name':'James Barnes Jr.','availability': ["Tuesday", "Wednesday", "Thursday", "Sunday"]}, gamers)
add_gamer({'name':'Michel Trujillo','availability': ["Monday", "Tuesday", "Wednesday"]}, gamers)
###Output
_____no_output_____
###Markdown
Finding the perfect availabilityNow that we have a list of all of the people interested in game night, we want to be able to calculate which nights would have the most participation. First we need to create a frequency table which correlates each day of the week with gamer availability.**Instructions**Create a function called `build_daily_frequency_table` that takes no argument returns a dictionary with the days of the week as keys and `0`s for values. We'll be using this to count the availability per night. Call `build_daily_frequency_table` and save the results to a variable called `count_availability`.
###Code
def build_daily_frequency_table():
return {
"Monday": 0,
"Tuesday": 0,
"Wednesday": 0,
"Thursday": 0,
"Friday": 0,
"Saturday": 0,
"Sunday": 0,
}
count_availability = build_daily_frequency_table()
###Output
_____no_output_____
###Markdown
Next we need to count the number of people every night.**Instructions**Write a function called `calculate_availability` that takes a list of gamers as an argument `gamers_list` and a frequency table `available_frequency`. The function should iterate through each gamer in `gamers_list` and iterate through each day in the gamer's availability. For each day in the gamer's availability, add one to that date on the frequency table.
###Code
def calculate_availability(gamers_list, available_frequency):
for gamer in gamers_list:
for day in gamer['availability']:
available_frequency[day] += 1
###Output
_____no_output_____
###Markdown
Now let's use these tools to find the best night to run Abruptly Goblins!**Instructions**Call `calculate_availability` with `gamers` and `count_availability`. Print out `count_availability` afterwards.
###Code
calculate_availability(gamers, count_availability)
print(count_availability)
###Output
{'Monday': 5, 'Tuesday': 4, 'Wednesday': 4, 'Thursday': 6, 'Friday': 3, 'Saturday': 4, 'Sunday': 3}
###Markdown
Lastly we need a way to pick the day with the most available people to attend so that we can schedule game night on that night.**Instructions**Write a function `find_best_night` that takes a dictionary `availability_table` and returns the key with the highest number.
###Code
def find_best_night(availability_table):
best_availability = 0
for day, availability in availability_table.items():
if availability > best_availability:
best_night = day
best_availability = availability
return best_night
###Output
_____no_output_____
###Markdown
Now let's find the best day to host game night.**Instructions**Call `find_best_night` with `count_availability`, store the result in a variable called `game_night`.Print out `game_night` to find out which day it is.
###Code
game_night = find_best_night(count_availability)
print(game_night)
###Output
Thursday
###Markdown
And let's make a list of all of the people who are available that night.**Instructions*** Create a function `available_on_night` that takes two parameters: `gamers_list` and `day` and returns a list of people who are available on that particular day.* Call `available_on_night` with `gamers` and `game_night` and save the result into the variable `attending_game_night`.* Print `attending_game_night`.
###Code
def available_on_night(gamers_list, day):
return [gamer for gamer in gamers_list if day in gamer['availability']]
attending_game_night = available_on_night(gamers, game_night)
print(attending_game_night)
###Output
[{'name': 'Thomas Nelson', 'availability': ['Tuesday', 'Thursday', 'Saturday']}, {'name': 'Michelle Reyes', 'availability': ['Wednesday', 'Thursday', 'Sunday']}, {'name': 'Stephen Adams', 'availability': ['Thursday', 'Saturday']}, {'name': 'Joanne Lynn', 'availability': ['Monday', 'Thursday']}, {'name': 'Crystal Brewer', 'availability': ['Thursday', 'Friday', 'Saturday']}, {'name': 'James Barnes Jr.', 'availability': ['Tuesday', 'Wednesday', 'Thursday', 'Sunday']}]
###Markdown
Generating an E-mail for the ParticipantsWith the best day for Abruptly Goblins! determined with computer precision, we need to let the attendees know that the game night is on a night they can attend. Let's start by creating a form email to send to each of the participants that we'll fill out with data later.**Instructions**Define a string, called `form_email` with interpolation variables `{name}`, `{day_of_week}`, and `{game}` (in case we decide we want to use this featureset to host a different game night). Use it to tell your gaming attendees the night their Abruptly Goblins! game can be played.
###Code
form_email = """
Dear {name},
The Sorcery Society is happy to host "{game}" night and wishes you will attend. Come by on {day_of_week} and have a blast!
Magically Yours,
the Sorcery Society
"""
###Output
_____no_output_____
###Markdown
**Instructions**Create a function `send_email` with three parameters: `gamers_who_can_attend`, `day`, and `game`. Print `form_email` for each gamer in `gamers_who_can_attend` with the appropriate `day` and `game`.Call `send_email` with `attending_game_night`, `game_night`, and `"Abruptly Goblins!"`.
###Code
def send_email(gamers_who_can_attend, day, game):
for gamer in gamers_who_can_attend:
print(form_email.format(name=gamer['name'], day_of_week=day, game=game))
send_email(attending_game_night, game_night, "Abruptly Goblins!")
###Output
Dear Thomas Nelson,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Thursday and have a blast!
Magically Yours,
the Sorcery Society
Dear Michelle Reyes,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Thursday and have a blast!
Magically Yours,
the Sorcery Society
Dear Stephen Adams,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Thursday and have a blast!
Magically Yours,
the Sorcery Society
Dear Joanne Lynn,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Thursday and have a blast!
Magically Yours,
the Sorcery Society
Dear Crystal Brewer,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Thursday and have a blast!
Magically Yours,
the Sorcery Society
Dear James Barnes Jr.,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Thursday and have a blast!
Magically Yours,
the Sorcery Society
###Markdown
AfterwardYou feel bad for the folks who weren't able to attend on the decided upon game night, and try to use your currently written methods to have a second game night of the week.**Instructions*** Create a list `unable_to_attend_best_night` of everyone in `gamers` that wasn't able to attend game night on `game_night`.* Create `second_night_availability` frequency table by calling `build_daily_frequency_table`.* Call `calculate_availability` with `unable_to_attend_best_night` and `second_night_availability`.* Call `find_best_night` with the now filled-in `second_night_availability`, save the results in `second_night`.
###Code
unable_to_attend_best_night = [gamer for gamer in gamers if game_night not in gamer['availability']]
second_night_availability = build_daily_frequency_table()
calculate_availability(unable_to_attend_best_night, second_night_availability)
second_night = find_best_night(second_night_availability)
###Output
_____no_output_____
###Markdown
Let's send out an email to everyone (whether they can attend the first night or not) whose marked themselves as available on our second game night.**Instructions*** Create the list `available_second_game_night` by calling `available_on_night` with `gamers` and `second_night`* Let the gamers know by calling `send_email` with `available_second_game_night`, `second_night`, and "Abruptly Goblins!"
###Code
available_second_game_night = available_on_night(gamers, second_night)
send_email(available_second_game_night, second_night, "Abruptly Goblins!")
###Output
Dear Kimberly Warner,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Monday and have a blast!
Magically Yours,
the Sorcery Society
Dear Joyce Sellers,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Monday and have a blast!
Magically Yours,
the Sorcery Society
Dear Joanne Lynn,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Monday and have a blast!
Magically Yours,
the Sorcery Society
Dear Latasha Bryan,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Monday and have a blast!
Magically Yours,
the Sorcery Society
Dear Michel Trujillo,
The Sorcery Society is happy to host "Abruptly Goblins!" night and wishes you will attend. Come by on Monday and have a blast!
Magically Yours,
the Sorcery Society
|
2018/09/solution.ipynb
|
###Markdown
Advent of Code 2018 - Day 9 Input
###Code
# data - part 1 - 410 players; last marble is worth 72059 points
# player_count, final_marble_value, high_score, winner_id = 410, 72059, 0, 370
# data - part 2 - 410 players; last marble is worth 7205900 points
# brute force approach (let it run for hours)
player_count, final_marble_value, high_score, winner_id = 410, 7205900, 0, 370
#
# data examples:
#
# player_count, final_marble_value, high_score, winner_id = 9, 25, 32, 5
# player_count, final_marble_value, high_score, winner_id = 10, 1618, 8317, 10
# player_count, final_marble_value, high_score, winner_id = 13, 7999, 146373, 12
# player_count, final_marble_value, high_score, winner_id = 17, 1104, 2764, 16
# player_count, final_marble_value, high_score, winner_id = 21, 6111, 54718, 5
# player_count, final_marble_value, high_score, winner_id = 30, 5807, 37305, 20
###Output
_____no_output_____
###Markdown
Part 1
###Code
marble_list = [0]
curr_marble_idx = 0
player_scores = {}
for curr_marble_val in range(1, final_marble_val + 1):
# adjust for marble value 0 belonging to no player
# (this could be cleaner)
player_nbr = ((curr_marble_val - 1) % player_count) + 1
if curr_marble_val == 1:
marble_list.append(1)
curr_marble_idx = 1
elif curr_marble_val % 23 == 0:
marble_to_remove_idx = (curr_marble_idx - 7) % len(marble_list)
marble_to_remove_val = marble_list.pop(marble_to_remove_idx)
curr_marble_idx = marble_to_remove_idx
player_scores[player_nbr] = (player_scores.get(player_nbr, 0)
+ curr_marble_val
+ marble_to_remove_val)
else:
# next pos calc is a little weird to append when pos = list len
next_marble_idx = ((curr_marble_idx + 1) % len(marble_list)) + 1
marble_list.insert(next_marble_idx, curr_marble_val)
curr_marble_idx = next_marble_idx
def print_high_score(player_scores):
player_id = max(player_scores, key=player_scores.get)
print '{}: {}'.format(player_id, player_scores[player_id])
print_high_score(player_scores)
###Output
215: 3624387659
|
geoLEARN/Submission_3_RF_FE.ipynb
|
###Markdown
Facies classification using Random Forest Contest entry by geoLEARN: Martin Blouin, Lorenzo Perozzi and Antoine Caté in collaboration with Erwan Gloaguen [Original contest notebook](../Facies_classification.ipynb) by Brendon Hall, [Enthought](https://www.enthought.com/)In this notebook we will train a machine learning algorithm to predict facies from well log data. The dataset comes from a class exercise from The University of Kansas on [Neural Networks and Fuzzy Systems](http://www.people.ku.edu/~gbohling/EECS833/). This exercise is based on a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. For more info on the origin of the data, see [Bohling and Dubois (2003)](http://www.kgs.ku.edu/PRS/publication/2003/ofr2003-50.pdf) and [Dubois et al. (2007)](http://dx.doi.org/10.1016/j.cageo.2006.08.011).The dataset consists of log data from nine wells that have been labeled with a facies type based on observation of core. We will use this log data to train a Random Forest model to classify facies types. Exploring the datasetFirst, we import and examine the dataset used to train the classifier.
###Code
###### Importing all used packages
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from pandas import set_option
# set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
###### Import packages needed for the make_vars functions
import Feature_Engineering as FE
##### import stuff from scikit learn
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, cross_val_score,LeavePGroupsOut, LeaveOneGroupOut, cross_val_predict
from sklearn.metrics import confusion_matrix, make_scorer, f1_score, accuracy_score, recall_score, precision_score
filename = '../facies_vectors.csv'
training_data = pd.read_csv(filename)
training_data.head()
training_data.describe()
###Output
_____no_output_____
###Markdown
A complete description of the dataset is given in the [Original contest notebook](../Facies_classification.ipynb) by Brendon Hall, [Enthought](https://www.enthought.com/). A total of four measured rock properties and two interpreted geological properties are given as raw predictor variables for the prediction of the "Facies" class. Feature engineeringAs stated in our [previous submission](Submission_increased_variance.ipynb), we believe that feature engineering has a high potential for increasing classification success. A strategy for building new variables is explained below.The dataset is distributed along a series of drillholes intersecting a stratigraphic sequence. Sedimentary facies tend to be deposited in sequences that reflect the evolution of the paleo-environment (variations in water depth, water temperature, biological activity, currents strenght, detrital input, ...). Each facies represents a specific depositional environment and is in contact with facies that represent a progressive transition to an other environment.Thus, there is a relationship between neighbouring samples, and the distribution of the data along drillholes can be as important as data values for predicting facies.A series of new variables (features) are calculated and tested below to help represent the relationship of neighbouring samples and the overall texture of the data along drillholes. These variables are:- detail and approximation coeficients at various levels of two [wavelet transforms](https://en.wikipedia.org/wiki/Discrete_wavelet_transform) (using two types of [Daubechies](https://en.wikipedia.org/wiki/Daubechies_wavelet) wavelets);- measures of the local entropy with variable observation windows;- measures of the local gradient with variable observation windows;- rolling statistical calculations (i.e., mean, standard deviation, min and max) with variable observation windows;- ratios between marine and non-marine lithofacies with different observation windows;- distances from the nearest marine or non-marine occurence uphole and downhole.Functions used to build these variables are located in the [Feature Engineering](Feature_Engineering.py) python script.All the data exploration work related to the conception and study of these variables is not presented here.
###Code
##### cD From wavelet db1
dwt_db1_cD_df = FE.make_dwt_vars_cD(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cA From wavelet db1
dwt_db1_cA_df = FE.make_dwt_vars_cA(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cD From wavelet db3
dwt_db3_cD_df = FE.make_dwt_vars_cD(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### cA From wavelet db3
dwt_db3_cA_df = FE.make_dwt_vars_cA(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### From entropy
entropy_df = FE.make_entropy_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
l_foots=[2, 3, 4, 5, 7, 10])
###### From gradient
gradient_df = FE.make_gradient_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
dx_list=[2, 3, 4, 5, 6, 10, 20])
##### From rolling average
moving_av_df = FE.make_moving_av_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[1, 2, 5, 10, 20])
##### From rolling standard deviation
moving_std_df = FE.make_moving_std_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
##### From rolling max
moving_max_df = FE.make_moving_max_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3, 4, 5, 7, 10, 15, 20])
##### From rolling min
moving_min_df = FE.make_moving_min_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
###### From rolling NM/M ratio
rolling_marine_ratio_df = FE.make_rolling_marine_ratio_vars(wells_df=training_data, windows=[5, 10, 15, 20, 30, 50, 75, 100, 200])
###### From distance to NM and M, up and down
dist_M_up_df = FE.make_distance_to_M_up_vars(wells_df=training_data)
dist_M_down_df = FE.make_distance_to_M_down_vars(wells_df=training_data)
dist_NM_up_df = FE.make_distance_to_NM_up_vars(wells_df=training_data)
dist_NM_down_df = FE.make_distance_to_NM_down_vars(wells_df=training_data)
list_df_var = [dwt_db1_cD_df, dwt_db1_cA_df, dwt_db3_cD_df, dwt_db3_cA_df,
entropy_df, gradient_df, moving_av_df, moving_std_df, moving_max_df, moving_min_df,
rolling_marine_ratio_df, dist_M_up_df, dist_M_down_df, dist_NM_up_df, dist_NM_down_df]
combined_df = training_data
for var_df in list_df_var:
temp_df = var_df
combined_df = pd.concat([combined_df,temp_df],axis=1)
combined_df.replace(to_replace=np.nan, value='-1', inplace=True)
print (combined_df.shape)
combined_df.head(5)
###Output
(4149, 299)
###Markdown
Building a prediction model from these variablesA Random Forest model is built here to test the effect of these new variables on the prediction power. Algorithm parameters have been tuned so as to take into account the non-stationarity of the training and testing sets using the LeaveOneGroupOut cross-validation strategy. The size of individual tree leafs and nodes has been increased to the maximum possible without significantly increasing the variance so as to reduce the bias of the prediction.Box plot for a series of scores obtained through cross validation are presented below. Create predictor and target arrays
###Code
X = combined_df.iloc[:, 4:]
y = combined_df['Facies']
groups = combined_df['Well Name']
###Output
_____no_output_____
###Markdown
Estimation of validation scores from this tuning
###Code
scoring_param = ['accuracy', 'recall_weighted', 'precision_weighted','f1_weighted']
scores = []
Cl = RandomForestClassifier(n_estimators=100, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42, n_jobs=-1)
lpgo = LeavePGroupsOut(n_groups=2)
for scoring in scoring_param:
cv=lpgo.split(X, y, groups)
validated = cross_val_score(Cl, X, y, scoring=scoring, cv=cv, n_jobs=-1)
scores.append(validated)
scores = np.array(scores)
scores = np.swapaxes(scores, 0, 1)
scores = pd.DataFrame(data=scores, columns=scoring_param)
sns.set_style('white')
fig,ax = plt.subplots(figsize=(8,6))
sns.boxplot(data=scores)
plt.xlabel('scoring parameters')
plt.ylabel('score')
plt.title('Classification scores for tuned parameters');
###Output
_____no_output_____
###Markdown
Evaluating feature importancesThe individual contribution to the classification for each feature (i.e., feature importances) can be obtained from a Random Forest classifier. This gives a good idea of the classification power of individual features and helps understanding which type of feature engineering is the most promising.Caution should be taken when interpreting feature importances, as highly correlated variables will tend to dilute their classification power between themselves and will rank lower than uncorelated variables.
###Code
####### Evaluation of feature importances
Cl = RandomForestClassifier(n_estimators=75, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42,oob_score=True, n_jobs=-1)
Cl.fit(X, y)
print ('OOB estimate of accuracy for prospectivity classification using all features: %s' % str(Cl.oob_score_))
importances = Cl.feature_importances_
std = np.std([tree.feature_importances_ for tree in Cl.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
Vars = list(X.columns.values)
for f in range(X.shape[1]):
print("%d. feature %d %s (%f)" % (f + 1, indices[f], Vars[indices[f]], importances[indices[f]]))
###Output
OOB estimate of accuracy for prospectivity classification using all features: 0.723065798988
Feature ranking:
1. feature 282 Marine_ratio_5_centered (0.031530)
2. feature 293 dist_NM_up (0.030115)
3. feature 288 Marine_ratio_75_centered (0.029691)
4. feature 292 dist_M_down (0.027875)
5. feature 284 Marine_ratio_15_centered (0.027651)
6. feature 285 Marine_ratio_20_centered (0.026680)
7. feature 290 Marine_ratio_200_centered (0.026423)
8. feature 286 Marine_ratio_30_centered (0.022103)
9. feature 5 NM_M (0.021090)
10. feature 289 Marine_ratio_100_centered (0.018909)
11. feature 287 Marine_ratio_50_centered (0.017506)
12. feature 283 Marine_ratio_10_centered (0.016424)
13. feature 171 PE_moving_av_20ft (0.014572)
14. feature 249 GR_moving_min_5ft (0.012678)
15. feature 291 dist_M_up (0.011630)
16. feature 250 GR_moving_min_7ft (0.011605)
17. feature 81 PE_cA_level_3 (0.010347)
18. feature 254 ILD_log10_moving_min_3ft (0.009445)
19. feature 272 PE_moving_min_10ft (0.009424)
20. feature 294 dist_NM_down (0.008873)
21. feature 219 ILD_log10_moving_max_3ft (0.008808)
22. feature 241 PHIND_moving_max_4ft (0.008783)
23. feature 224 ILD_log10_moving_max_15ft (0.008776)
24. feature 248 GR_moving_min_4ft (0.008697)
25. feature 0 GR (0.008360)
26. feature 223 ILD_log10_moving_max_10ft (0.008254)
27. feature 271 PE_moving_min_7ft (0.008216)
28. feature 170 PE_moving_av_10ft (0.007934)
29. feature 251 GR_moving_min_10ft (0.007851)
30. feature 277 PHIND_moving_min_5ft (0.007727)
31. feature 275 PHIND_moving_min_3ft (0.007675)
32. feature 257 ILD_log10_moving_min_7ft (0.007360)
33. feature 240 PHIND_moving_max_3ft (0.007212)
34. feature 247 GR_moving_min_3ft (0.007172)
35. feature 159 ILD_log10_moving_av_5ft (0.007125)
36. feature 221 ILD_log10_moving_max_5ft (0.007063)
37. feature 174 PHIND_moving_av_5ft (0.006821)
38. feature 37 DeltaPHI_cA_level_3 (0.006339)
39. feature 160 ILD_log10_moving_av_10ft (0.006312)
40. feature 255 ILD_log10_moving_min_4ft (0.006159)
41. feature 259 ILD_log10_moving_min_15ft (0.005927)
42. feature 269 PE_moving_min_4ft (0.005768)
43. feature 31 ILD_log10_cA_level_1 (0.005768)
44. feature 239 PE_moving_max_20ft (0.005693)
45. feature 152 GR_moving_av_1ft (0.005620)
46. feature 1 ILD_log10 (0.005587)
47. feature 3 PHIND (0.005504)
48. feature 6 RELPOS (0.005443)
49. feature 153 GR_moving_av_2ft (0.005402)
50. feature 34 ILD_log10_cA_level_4 (0.005058)
51. feature 238 PE_moving_max_15ft (0.005040)
52. feature 157 ILD_log10_moving_av_1ft (0.004991)
53. feature 43 PHIND_cA_level_1 (0.004968)
54. feature 246 PHIND_moving_max_20ft (0.004958)
55. feature 276 PHIND_moving_min_4ft (0.004852)
56. feature 230 DeltaPHI_moving_max_10ft (0.004707)
57. feature 173 PHIND_moving_av_2ft (0.004687)
58. feature 39 PE_cA_level_1 (0.004664)
59. feature 32 ILD_log10_cA_level_2 (0.004654)
60. feature 242 PHIND_moving_max_5ft (0.004548)
61. feature 172 PHIND_moving_av_1ft (0.004467)
62. feature 41 PE_cA_level_3 (0.004452)
63. feature 158 ILD_log10_moving_av_2ft (0.004395)
64. feature 256 ILD_log10_moving_min_5ft (0.004373)
65. feature 264 DeltaPHI_moving_min_7ft (0.004358)
66. feature 222 ILD_log10_moving_max_7ft (0.004249)
67. feature 165 DeltaPHI_moving_av_10ft (0.004245)
68. feature 270 PE_moving_min_5ft (0.004242)
69. feature 161 ILD_log10_moving_av_20ft (0.004107)
70. feature 38 DeltaPHI_cA_level_4 (0.004102)
71. feature 155 GR_moving_av_10ft (0.004062)
72. feature 236 PE_moving_max_7ft (0.003970)
73. feature 204 PE_moving_std_20ft (0.003910)
74. feature 79 PE_cA_level_1 (0.003905)
75. feature 83 PHIND_cA_level_1 (0.003900)
76. feature 278 PHIND_moving_min_7ft (0.003852)
77. feature 33 ILD_log10_cA_level_3 (0.003812)
78. feature 220 ILD_log10_moving_max_4ft (0.003765)
79. feature 169 PE_moving_av_5ft (0.003751)
80. feature 86 PHIND_cA_level_4 (0.003729)
81. feature 231 DeltaPHI_moving_max_15ft (0.003673)
82. feature 176 PHIND_moving_av_20ft (0.003595)
83. feature 243 PHIND_moving_max_7ft (0.003566)
84. feature 70 GR_cA_level_4 (0.003537)
85. feature 263 DeltaPHI_moving_min_5ft (0.003492)
86. feature 212 GR_moving_max_3ft (0.003473)
87. feature 229 DeltaPHI_moving_max_7ft (0.003400)
88. feature 260 ILD_log10_moving_min_20ft (0.003384)
89. feature 168 PE_moving_av_2ft (0.003371)
90. feature 22 PE_cD_level_4 (0.003364)
91. feature 245 PHIND_moving_max_15ft (0.003225)
92. feature 274 PE_moving_min_20ft (0.003152)
93. feature 82 PE_cA_level_4 (0.003132)
94. feature 87 GR_entropy_foot2 (0.003090)
95. feature 27 GR_cA_level_1 (0.003087)
96. feature 10 GR_cD_level_4 (0.003044)
97. feature 54 ILD_log10_cD_level_4 (0.003015)
98. feature 40 PE_cA_level_2 (0.002986)
99. feature 62 PE_cD_level_4 (0.002894)
100. feature 2 DeltaPHI (0.002862)
101. feature 98 ILD_log10_entropy_foot10 (0.002792)
102. feature 78 DeltaPHI_cA_level_4 (0.002736)
103. feature 266 DeltaPHI_moving_min_15ft (0.002735)
104. feature 45 PHIND_cA_level_3 (0.002653)
105. feature 261 DeltaPHI_moving_min_3ft (0.002625)
106. feature 166 DeltaPHI_moving_av_20ft (0.002619)
107. feature 46 PHIND_cA_level_4 (0.002609)
108. feature 154 GR_moving_av_5ft (0.002592)
109. feature 77 DeltaPHI_cA_level_3 (0.002551)
110. feature 232 DeltaPHI_moving_max_20ft (0.002545)
111. feature 252 GR_moving_min_15ft (0.002541)
112. feature 268 PE_moving_min_3ft (0.002493)
113. feature 9 GR_cD_level_3 (0.002458)
114. feature 175 PHIND_moving_av_10ft (0.002419)
115. feature 227 DeltaPHI_moving_max_4ft (0.002415)
116. feature 30 GR_cA_level_4 (0.002377)
117. feature 211 PHIND_moving_std_20ft (0.002376)
118. feature 244 PHIND_moving_max_10ft (0.002362)
119. feature 267 DeltaPHI_moving_min_20ft (0.002321)
120. feature 163 DeltaPHI_moving_av_2ft (0.002276)
121. feature 253 GR_moving_min_20ft (0.002248)
122. feature 210 PHIND_moving_std_15ft (0.002224)
123. feature 228 DeltaPHI_moving_max_5ft (0.002209)
124. feature 92 GR_entropy_foot10 (0.002205)
125. feature 279 PHIND_moving_min_10ft (0.002189)
126. feature 68 GR_cA_level_2 (0.002179)
127. feature 225 ILD_log10_moving_max_20ft (0.002164)
128. feature 35 DeltaPHI_cA_level_1 (0.002157)
129. feature 196 DeltaPHI_moving_std_15ft (0.002150)
130. feature 235 PE_moving_max_5ft (0.002136)
131. feature 237 PE_moving_max_10ft (0.002133)
132. feature 233 PE_moving_max_3ft (0.002054)
133. feature 17 DeltaPHI_cD_level_3 (0.002046)
134. feature 44 PHIND_cA_level_2 (0.002030)
135. feature 197 DeltaPHI_moving_std_20ft (0.002011)
136. feature 74 ILD_log10_cA_level_4 (0.002005)
137. feature 69 GR_cA_level_3 (0.001974)
138. feature 84 PHIND_cA_level_2 (0.001971)
139. feature 234 PE_moving_max_4ft (0.001941)
140. feature 36 DeltaPHI_cA_level_2 (0.001940)
141. feature 265 DeltaPHI_moving_min_10ft (0.001915)
142. feature 25 PHIND_cD_level_3 (0.001901)
143. feature 280 PHIND_moving_min_15ft (0.001895)
144. feature 156 GR_moving_av_20ft (0.001888)
145. feature 42 PE_cA_level_4 (0.001869)
146. feature 226 DeltaPHI_moving_max_3ft (0.001856)
147. feature 113 PHIND_entropy_foot4 (0.001846)
148. feature 262 DeltaPHI_moving_min_4ft (0.001806)
149. feature 58 DeltaPHI_cD_level_4 (0.001804)
150. feature 66 PHIND_cD_level_4 (0.001798)
151. feature 203 PE_moving_std_15ft (0.001797)
152. feature 162 DeltaPHI_moving_av_1ft (0.001776)
153. feature 164 DeltaPHI_moving_av_5ft (0.001759)
154. feature 88 GR_entropy_foot3 (0.001689)
155. feature 14 ILD_log10_cD_level_4 (0.001689)
156. feature 18 DeltaPHI_cD_level_4 (0.001684)
157. feature 90 GR_entropy_foot5 (0.001682)
158. feature 89 GR_entropy_foot4 (0.001646)
159. feature 215 GR_moving_max_7ft (0.001638)
160. feature 217 GR_moving_max_15ft (0.001597)
161. feature 73 ILD_log10_cA_level_3 (0.001554)
162. feature 28 GR_cA_level_2 (0.001541)
163. feature 216 GR_moving_max_10ft (0.001515)
164. feature 218 GR_moving_max_20ft (0.001498)
165. feature 273 PE_moving_min_15ft (0.001492)
166. feature 190 ILD_log10_moving_std_20ft (0.001424)
167. feature 189 ILD_log10_moving_std_15ft (0.001422)
168. feature 50 GR_cD_level_4 (0.001421)
169. feature 258 ILD_log10_moving_min_10ft (0.001419)
170. feature 94 ILD_log10_entropy_foot3 (0.001407)
171. feature 182 GR_moving_std_15ft (0.001398)
172. feature 202 PE_moving_std_10ft (0.001389)
173. feature 97 ILD_log10_entropy_foot7 (0.001343)
174. feature 67 GR_cA_level_1 (0.001277)
175. feature 65 PHIND_cD_level_3 (0.001261)
176. feature 183 GR_moving_std_20ft (0.001236)
177. feature 61 PE_cD_level_3 (0.001223)
178. feature 96 ILD_log10_entropy_foot5 (0.001221)
179. feature 12 ILD_log10_cD_level_2 (0.001213)
180. feature 29 GR_cA_level_3 (0.001206)
181. feature 209 PHIND_moving_std_10ft (0.001194)
182. feature 115 PHIND_entropy_foot7 (0.001187)
183. feature 213 GR_moving_max_4ft (0.001178)
184. feature 57 DeltaPHI_cD_level_3 (0.001171)
185. feature 95 ILD_log10_entropy_foot4 (0.001152)
186. feature 281 PHIND_moving_min_20ft (0.001129)
187. feature 181 GR_moving_std_10ft (0.001116)
188. feature 8 GR_cD_level_2 (0.001071)
189. feature 80 PE_cA_level_2 (0.001048)
190. feature 26 PHIND_cD_level_4 (0.001042)
191. feature 111 PHIND_entropy_foot2 (0.001025)
192. feature 179 GR_moving_std_5ft (0.001022)
193. feature 75 DeltaPHI_cA_level_1 (0.001017)
194. feature 214 GR_moving_max_5ft (0.001001)
195. feature 180 GR_moving_std_7ft (0.000997)
196. feature 85 PHIND_cA_level_3 (0.000971)
197. feature 114 PHIND_entropy_foot5 (0.000971)
198. feature 201 PE_moving_std_7ft (0.000905)
199. feature 72 ILD_log10_cA_level_2 (0.000866)
200. feature 13 ILD_log10_cD_level_3 (0.000861)
201. feature 186 ILD_log10_moving_std_5ft (0.000842)
202. feature 194 DeltaPHI_moving_std_7ft (0.000805)
203. feature 21 PE_cD_level_3 (0.000796)
204. feature 93 ILD_log10_entropy_foot2 (0.000760)
205. feature 60 PE_cD_level_2 (0.000742)
206. feature 64 PHIND_cD_level_2 (0.000741)
207. feature 200 PE_moving_std_5ft (0.000726)
208. feature 139 PEgradient_dx3 (0.000719)
209. feature 187 ILD_log10_moving_std_7ft (0.000715)
210. feature 7 GR_cD_level_1 (0.000714)
211. feature 24 PHIND_cD_level_2 (0.000688)
212. feature 53 ILD_log10_cD_level_3 (0.000687)
213. feature 206 PHIND_moving_std_4ft (0.000684)
214. feature 112 PHIND_entropy_foot3 (0.000655)
215. feature 4 PE (0.000629)
216. feature 76 DeltaPHI_cA_level_2 (0.000592)
217. feature 71 ILD_log10_cA_level_1 (0.000589)
218. feature 55 DeltaPHI_cD_level_1 (0.000575)
219. feature 100 DeltaPHI_entropy_foot3 (0.000571)
220. feature 208 PHIND_moving_std_7ft (0.000568)
221. feature 207 PHIND_moving_std_5ft (0.000560)
222. feature 104 DeltaPHI_entropy_foot10 (0.000551)
223. feature 195 DeltaPHI_moving_std_10ft (0.000547)
224. feature 205 PHIND_moving_std_3ft (0.000535)
225. feature 116 PHIND_entropy_foot10 (0.000525)
226. feature 188 ILD_log10_moving_std_10ft (0.000519)
227. feature 101 DeltaPHI_entropy_foot4 (0.000516)
228. feature 192 DeltaPHI_moving_std_4ft (0.000513)
229. feature 49 GR_cD_level_3 (0.000511)
230. feature 128 ILD_log10gradient_dx6 (0.000503)
231. feature 140 PEgradient_dx4 (0.000470)
232. feature 56 DeltaPHI_cD_level_2 (0.000447)
233. feature 19 PE_cD_level_1 (0.000443)
234. feature 119 GRgradient_dx4 (0.000442)
235. feature 122 GRgradient_dx10 (0.000438)
236. feature 191 DeltaPHI_moving_std_3ft (0.000429)
237. feature 198 PE_moving_std_3ft (0.000415)
238. feature 138 PEgradient_dx2 (0.000411)
239. feature 129 ILD_log10gradient_dx10 (0.000406)
240. feature 193 DeltaPHI_moving_std_5ft (0.000394)
241. feature 145 PHINDgradient_dx2 (0.000389)
242. feature 125 ILD_log10gradient_dx3 (0.000387)
243. feature 52 ILD_log10_cD_level_2 (0.000387)
244. feature 151 PHINDgradient_dx20 (0.000368)
245. feature 127 ILD_log10gradient_dx5 (0.000368)
246. feature 121 GRgradient_dx6 (0.000347)
247. feature 132 DeltaPHIgradient_dx3 (0.000344)
248. feature 99 DeltaPHI_entropy_foot2 (0.000341)
249. feature 51 ILD_log10_cD_level_1 (0.000336)
250. feature 63 PHIND_cD_level_1 (0.000321)
251. feature 146 PHINDgradient_dx3 (0.000315)
252. feature 147 PHINDgradient_dx4 (0.000314)
253. feature 141 PEgradient_dx5 (0.000312)
254. feature 136 DeltaPHIgradient_dx10 (0.000311)
255. feature 103 DeltaPHI_entropy_foot7 (0.000310)
256. feature 167 PE_moving_av_1ft (0.000301)
257. feature 126 ILD_log10gradient_dx4 (0.000293)
258. feature 149 PHINDgradient_dx6 (0.000293)
259. feature 48 GR_cD_level_2 (0.000289)
260. feature 102 DeltaPHI_entropy_foot5 (0.000280)
261. feature 118 GRgradient_dx3 (0.000279)
262. feature 15 DeltaPHI_cD_level_1 (0.000274)
263. feature 11 ILD_log10_cD_level_1 (0.000258)
264. feature 148 PHINDgradient_dx5 (0.000247)
265. feature 123 GRgradient_dx20 (0.000244)
266. feature 144 PEgradient_dx20 (0.000236)
267. feature 150 PHINDgradient_dx10 (0.000231)
268. feature 124 ILD_log10gradient_dx2 (0.000230)
269. feature 108 PE_entropy_foot5 (0.000223)
270. feature 142 PEgradient_dx6 (0.000223)
271. feature 185 ILD_log10_moving_std_4ft (0.000197)
272. feature 16 DeltaPHI_cD_level_2 (0.000194)
273. feature 135 DeltaPHIgradient_dx6 (0.000187)
274. feature 178 GR_moving_std_4ft (0.000178)
275. feature 20 PE_cD_level_2 (0.000170)
276. feature 59 PE_cD_level_1 (0.000169)
277. feature 130 ILD_log10gradient_dx20 (0.000166)
278. feature 23 PHIND_cD_level_1 (0.000166)
279. feature 137 DeltaPHIgradient_dx20 (0.000162)
280. feature 134 DeltaPHIgradient_dx5 (0.000160)
281. feature 133 DeltaPHIgradient_dx4 (0.000151)
282. feature 143 PEgradient_dx10 (0.000150)
283. feature 120 GRgradient_dx5 (0.000132)
284. feature 110 PE_entropy_foot10 (0.000127)
285. feature 106 PE_entropy_foot3 (0.000118)
286. feature 47 GR_cD_level_1 (0.000105)
287. feature 109 PE_entropy_foot7 (0.000104)
288. feature 184 ILD_log10_moving_std_3ft (0.000100)
289. feature 107 PE_entropy_foot4 (0.000093)
290. feature 177 GR_moving_std_3ft (0.000077)
291. feature 131 DeltaPHIgradient_dx2 (0.000072)
292. feature 199 PE_moving_std_4ft (0.000071)
293. feature 117 GRgradient_dx2 (0.000065)
294. feature 91 GR_entropy_foot7 (0.000041)
295. feature 105 PE_entropy_foot2 (0.000000)
###Markdown
Plot the feature importances of the forest
###Code
sns.set_style('white')
fig,ax = plt.subplots(figsize=(15,5))
ax.bar(range(X.shape[1]), importances[indices],color="r", align="center")
plt.ylabel("Feature importance")
plt.xlabel('Ranked features')
plt.xticks([], indices)
plt.xlim([-1, X.shape[1]]);
###Output
_____no_output_____
###Markdown
Features derived from raw geological variables tend to have the highest classification power. Rolling min, max and mean tend to have better classification power than raw data. Wavelet approximation coeficients tend to have a similar to lower classification power than raw data. Features expressing local texture of the data (entropy, gradient, standard deviation and wavelet detail coeficients) have a low classification power but still participate in the prediction. Confusion matrixThe confusion matrix from the validation test is presented below.
###Code
######## Confusion matrix from this tuning
cv=LeaveOneGroupOut().split(X, y, groups)
y_pred = cross_val_predict(Cl, X, y, cv=cv, n_jobs=-1)
conf_mat = confusion_matrix(y, y_pred)
list_facies = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
conf_mat = pd.DataFrame(conf_mat, columns=list_facies, index=list_facies)
conf_mat.head(10)
###Output
_____no_output_____
###Markdown
Applying the classification model to test data
###Code
filename = '../validation_data_nofacies.csv'
test_data = pd.read_csv(filename)
test_data.head(5)
##### cD From wavelet db1
dwt_db1_cD_df = FE.make_dwt_vars_cD(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cA From wavelet db1
dwt_db1_cA_df = FE.make_dwt_vars_cA(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cD From wavelet db3
dwt_db3_cD_df = FE.make_dwt_vars_cD(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### cA From wavelet db3
dwt_db3_cA_df = FE.make_dwt_vars_cA(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### From entropy
entropy_df = FE.make_entropy_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
l_foots=[2, 3, 4, 5, 7, 10])
###### From gradient
gradient_df = FE.make_gradient_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
dx_list=[2, 3, 4, 5, 6, 10, 20])
##### From rolling average
moving_av_df = FE.make_moving_av_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[1, 2, 5, 10, 20])
##### From rolling standard deviation
moving_std_df = FE.make_moving_std_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
##### From rolling max
moving_max_df = FE.make_moving_max_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3, 4, 5, 7, 10, 15, 20])
##### From rolling min
moving_min_df = FE.make_moving_min_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
###### From rolling NM/M ratio
rolling_marine_ratio_df = FE.make_rolling_marine_ratio_vars(wells_df=test_data, windows=[5, 10, 15, 20, 30, 50, 75, 100, 200])
###### From distance to NM and M, up and down
dist_M_up_df = FE.make_distance_to_M_up_vars(wells_df=test_data)
dist_M_down_df = FE.make_distance_to_M_down_vars(wells_df=test_data)
dist_NM_up_df = FE.make_distance_to_NM_up_vars(wells_df=test_data)
dist_NM_down_df = FE.make_distance_to_NM_down_vars(wells_df=test_data)
combined_test_df = test_data
list_df_var = [dwt_db1_cD_df, dwt_db1_cA_df, dwt_db3_cD_df, dwt_db3_cA_df,
entropy_df, gradient_df, moving_av_df, moving_std_df, moving_max_df, moving_min_df,
rolling_marine_ratio_df, dist_M_up_df, dist_M_down_df, dist_NM_up_df, dist_NM_down_df]
for var_df in list_df_var:
temp_df = var_df
combined_test_df = pd.concat([combined_test_df,temp_df],axis=1)
combined_test_df.replace(to_replace=np.nan, value='-99999', inplace=True)
X_test = combined_test_df.iloc[:, 3:]
print (combined_test_df.shape)
combined_test_df.head(5)
Cl = RandomForestClassifier(n_estimators=100, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42, n_jobs=-1)
Cl.fit(X, y)
y_test = Cl.predict(X_test)
y_test = pd.DataFrame(y_test, columns=['Predicted Facies'])
test_pred_df = pd.concat([combined_test_df[['Well Name', 'Depth']], y_test], axis=1)
test_pred_df.head()
###Output
_____no_output_____
###Markdown
Exporting results
###Code
test_pred_df.to_pickle('Prediction_blind_wells_RF_c.pkl')
###Output
_____no_output_____
###Markdown
Facies classification using Random Forest Contest entry by :[geoLEARN](http://geolearn.ca/) [Martin Blouin](https://github.com/mablou), [Lorenzo Perozzi](https://github.com/lperozzi), [Antoine Caté](https://github.com/Antoine-Cate) in collaboration with [Erwan Gloaguen](http://ete.inrs.ca/erwan-gloaguen) [Original contest notebook](../Facies_classification.ipynb) by Brendon Hall, [Enthought](https://www.enthought.com/)In this notebook we will train a machine learning algorithm to predict facies from well log data. The dataset comes from a class exercise from The University of Kansas on [Neural Networks and Fuzzy Systems](http://www.people.ku.edu/~gbohling/EECS833/). This exercise is based on a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. For more info on the origin of the data, see [Bohling and Dubois (2003)](http://www.kgs.ku.edu/PRS/publication/2003/ofr2003-50.pdf) and [Dubois et al. (2007)](http://dx.doi.org/10.1016/j.cageo.2006.08.011).The dataset consists of log data from nine wells that have been labeled with a facies type based on observation of core. We will use this log data to train a Random Forest model to classify facies types. Exploring the datasetFirst, we import and examine the dataset used to train the classifier.
###Code
###### Importing all used packages
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from pandas import set_option
# set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
###### Import packages needed for the make_vars functions
import Feature_Engineering as FE
##### import stuff from scikit learn
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, cross_val_score,LeavePGroupsOut, LeaveOneGroupOut, cross_val_predict
from sklearn.metrics import confusion_matrix, make_scorer, f1_score, accuracy_score, recall_score, precision_score
filename = '../facies_vectors.csv'
training_data = pd.read_csv(filename)
training_data.head()
training_data.describe()
###Output
_____no_output_____
###Markdown
A complete description of the dataset is given in the [Original contest notebook](../Facies_classification.ipynb) by Brendon Hall, [Enthought](https://www.enthought.com/). A total of four measured rock properties and two interpreted geological properties are given as raw predictor variables for the prediction of the "Facies" class. Feature engineeringAs stated in our [previous submission](Submission_increased_variance.ipynb), we believe that feature engineering has a high potential for increasing classification success. A strategy for building new variables is explained below.The dataset is distributed along a series of drillholes intersecting a stratigraphic sequence. Sedimentary facies tend to be deposited in sequences that reflect the evolution of the paleo-environment (variations in water depth, water temperature, biological activity, currents strenght, detrital input, ...). Each facies represents a specific depositional environment and is in contact with facies that represent a progressive transition to an other environment.Thus, there is a relationship between neighbouring samples, and the distribution of the data along drillholes can be as important as data values for predicting facies.A series of new variables (features) are calculated and tested below to help represent the relationship of neighbouring samples and the overall texture of the data along drillholes. These variables are:- detail and approximation coeficients at various levels of two [wavelet transforms](https://en.wikipedia.org/wiki/Discrete_wavelet_transform) (using two types of [Daubechies](https://en.wikipedia.org/wiki/Daubechies_wavelet) wavelets);- measures of the local entropy with variable observation windows;- measures of the local gradient with variable observation windows;- rolling statistical calculations (i.e., mean, standard deviation, min and max) with variable observation windows;- ratios between marine and non-marine lithofacies with different observation windows;- distances from the nearest marine or non-marine occurence uphole and downhole.Functions used to build these variables are located in the [Feature Engineering](Feature_Engineering.py) python script.All the data exploration work related to the conception and study of these variables is not presented here.
###Code
##### cD From wavelet db1
dwt_db1_cD_df = FE.make_dwt_vars_cD(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cA From wavelet db1
dwt_db1_cA_df = FE.make_dwt_vars_cA(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cD From wavelet db3
dwt_db3_cD_df = FE.make_dwt_vars_cD(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### cA From wavelet db3
dwt_db3_cA_df = FE.make_dwt_vars_cA(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### From entropy
entropy_df = FE.make_entropy_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
l_foots=[2, 3, 4, 5, 7, 10])
###### From gradient
gradient_df = FE.make_gradient_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
dx_list=[2, 3, 4, 5, 6, 10, 20])
##### From rolling average
moving_av_df = FE.make_moving_av_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[1, 2, 5, 10, 20])
##### From rolling standard deviation
moving_std_df = FE.make_moving_std_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
##### From rolling max
moving_max_df = FE.make_moving_max_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3, 4, 5, 7, 10, 15, 20])
##### From rolling min
moving_min_df = FE.make_moving_min_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
###### From rolling NM/M ratio
rolling_marine_ratio_df = FE.make_rolling_marine_ratio_vars(wells_df=training_data, windows=[5, 10, 15, 20, 30, 50, 75, 100, 200])
###### From distance to NM and M, up and down
dist_M_up_df = FE.make_distance_to_M_up_vars(wells_df=training_data)
dist_M_down_df = FE.make_distance_to_M_down_vars(wells_df=training_data)
dist_NM_up_df = FE.make_distance_to_NM_up_vars(wells_df=training_data)
dist_NM_down_df = FE.make_distance_to_NM_down_vars(wells_df=training_data)
list_df_var = [dwt_db1_cD_df, dwt_db1_cA_df, dwt_db3_cD_df, dwt_db3_cA_df,
entropy_df, gradient_df, moving_av_df, moving_std_df, moving_max_df, moving_min_df,
rolling_marine_ratio_df, dist_M_up_df, dist_M_down_df, dist_NM_up_df, dist_NM_down_df]
combined_df = training_data
for var_df in list_df_var:
temp_df = var_df
combined_df = pd.concat([combined_df,temp_df],axis=1)
combined_df.replace(to_replace=np.nan, value='-1', inplace=True)
print (combined_df.shape)
combined_df.head(5)
###Output
(4149, 299)
###Markdown
Building a prediction model from these variablesA Random Forest model is built here to test the effect of these new variables on the prediction power. Algorithm parameters have been tuned so as to take into account the non-stationarity of the training and testing sets using the LeaveOneGroupOut cross-validation strategy. The size of individual tree leafs and nodes has been increased to the maximum possible without significantly increasing the variance so as to reduce the bias of the prediction.Box plot for a series of scores obtained through cross validation are presented below. Create predictor and target arrays
###Code
X = combined_df.iloc[:, 4:]
y = combined_df['Facies']
groups = combined_df['Well Name']
###Output
_____no_output_____
###Markdown
Estimation of validation scores from this tuning
###Code
scoring_param = ['accuracy', 'recall_weighted', 'precision_weighted','f1_weighted']
scores = []
Cl = RandomForestClassifier(n_estimators=100, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42, n_jobs=-1)
lpgo = LeavePGroupsOut(n_groups=2)
for scoring in scoring_param:
cv=lpgo.split(X, y, groups)
validated = cross_val_score(Cl, X, y, scoring=scoring, cv=cv, n_jobs=-1)
scores.append(validated)
scores = np.array(scores)
scores = np.swapaxes(scores, 0, 1)
scores = pd.DataFrame(data=scores, columns=scoring_param)
sns.set_style('white')
fig,ax = plt.subplots(figsize=(8,6))
sns.boxplot(data=scores)
plt.xlabel('scoring parameters')
plt.ylabel('score')
plt.title('Classification scores for tuned parameters');
###Output
_____no_output_____
###Markdown
Evaluating feature importancesThe individual contribution to the classification for each feature (i.e., feature importances) can be obtained from a Random Forest classifier. This gives a good idea of the classification power of individual features and helps understanding which type of feature engineering is the most promising.Caution should be taken when interpreting feature importances, as highly correlated variables will tend to dilute their classification power between themselves and will rank lower than uncorelated variables.
###Code
####### Evaluation of feature importances
Cl = RandomForestClassifier(n_estimators=75, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42,oob_score=True, n_jobs=-1)
Cl.fit(X, y)
print ('OOB estimate of accuracy for prospectivity classification using all features: %s' % str(Cl.oob_score_))
importances = Cl.feature_importances_
std = np.std([tree.feature_importances_ for tree in Cl.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
Vars = list(X.columns.values)
for f in range(X.shape[1]):
print("%d. feature %d %s (%f)" % (f + 1, indices[f], Vars[indices[f]], importances[indices[f]]))
###Output
OOB estimate of accuracy for prospectivity classification using all features: 0.723065798988
Feature ranking:
1. feature 282 Marine_ratio_5_centered (0.031530)
2. feature 293 dist_NM_up (0.030115)
3. feature 288 Marine_ratio_75_centered (0.029691)
4. feature 292 dist_M_down (0.027875)
5. feature 284 Marine_ratio_15_centered (0.027651)
6. feature 285 Marine_ratio_20_centered (0.026680)
7. feature 290 Marine_ratio_200_centered (0.026423)
8. feature 286 Marine_ratio_30_centered (0.022103)
9. feature 5 NM_M (0.021090)
10. feature 289 Marine_ratio_100_centered (0.018909)
11. feature 287 Marine_ratio_50_centered (0.017506)
12. feature 283 Marine_ratio_10_centered (0.016424)
13. feature 171 PE_moving_av_20ft (0.014572)
14. feature 249 GR_moving_min_5ft (0.012678)
15. feature 291 dist_M_up (0.011630)
16. feature 250 GR_moving_min_7ft (0.011605)
17. feature 81 PE_cA_level_3 (0.010347)
18. feature 254 ILD_log10_moving_min_3ft (0.009445)
19. feature 272 PE_moving_min_10ft (0.009424)
20. feature 294 dist_NM_down (0.008873)
21. feature 219 ILD_log10_moving_max_3ft (0.008808)
22. feature 241 PHIND_moving_max_4ft (0.008783)
23. feature 224 ILD_log10_moving_max_15ft (0.008776)
24. feature 248 GR_moving_min_4ft (0.008697)
25. feature 0 GR (0.008360)
26. feature 223 ILD_log10_moving_max_10ft (0.008254)
27. feature 271 PE_moving_min_7ft (0.008216)
28. feature 170 PE_moving_av_10ft (0.007934)
29. feature 251 GR_moving_min_10ft (0.007851)
30. feature 277 PHIND_moving_min_5ft (0.007727)
31. feature 275 PHIND_moving_min_3ft (0.007675)
32. feature 257 ILD_log10_moving_min_7ft (0.007360)
33. feature 240 PHIND_moving_max_3ft (0.007212)
34. feature 247 GR_moving_min_3ft (0.007172)
35. feature 159 ILD_log10_moving_av_5ft (0.007125)
36. feature 221 ILD_log10_moving_max_5ft (0.007063)
37. feature 174 PHIND_moving_av_5ft (0.006821)
38. feature 37 DeltaPHI_cA_level_3 (0.006339)
39. feature 160 ILD_log10_moving_av_10ft (0.006312)
40. feature 255 ILD_log10_moving_min_4ft (0.006159)
41. feature 259 ILD_log10_moving_min_15ft (0.005927)
42. feature 269 PE_moving_min_4ft (0.005768)
43. feature 31 ILD_log10_cA_level_1 (0.005768)
44. feature 239 PE_moving_max_20ft (0.005693)
45. feature 152 GR_moving_av_1ft (0.005620)
46. feature 1 ILD_log10 (0.005587)
47. feature 3 PHIND (0.005504)
48. feature 6 RELPOS (0.005443)
49. feature 153 GR_moving_av_2ft (0.005402)
50. feature 34 ILD_log10_cA_level_4 (0.005058)
51. feature 238 PE_moving_max_15ft (0.005040)
52. feature 157 ILD_log10_moving_av_1ft (0.004991)
53. feature 43 PHIND_cA_level_1 (0.004968)
54. feature 246 PHIND_moving_max_20ft (0.004958)
55. feature 276 PHIND_moving_min_4ft (0.004852)
56. feature 230 DeltaPHI_moving_max_10ft (0.004707)
57. feature 173 PHIND_moving_av_2ft (0.004687)
58. feature 39 PE_cA_level_1 (0.004664)
59. feature 32 ILD_log10_cA_level_2 (0.004654)
60. feature 242 PHIND_moving_max_5ft (0.004548)
61. feature 172 PHIND_moving_av_1ft (0.004467)
62. feature 41 PE_cA_level_3 (0.004452)
63. feature 158 ILD_log10_moving_av_2ft (0.004395)
64. feature 256 ILD_log10_moving_min_5ft (0.004373)
65. feature 264 DeltaPHI_moving_min_7ft (0.004358)
66. feature 222 ILD_log10_moving_max_7ft (0.004249)
67. feature 165 DeltaPHI_moving_av_10ft (0.004245)
68. feature 270 PE_moving_min_5ft (0.004242)
69. feature 161 ILD_log10_moving_av_20ft (0.004107)
70. feature 38 DeltaPHI_cA_level_4 (0.004102)
71. feature 155 GR_moving_av_10ft (0.004062)
72. feature 236 PE_moving_max_7ft (0.003970)
73. feature 204 PE_moving_std_20ft (0.003910)
74. feature 79 PE_cA_level_1 (0.003905)
75. feature 83 PHIND_cA_level_1 (0.003900)
76. feature 278 PHIND_moving_min_7ft (0.003852)
77. feature 33 ILD_log10_cA_level_3 (0.003812)
78. feature 220 ILD_log10_moving_max_4ft (0.003765)
79. feature 169 PE_moving_av_5ft (0.003751)
80. feature 86 PHIND_cA_level_4 (0.003729)
81. feature 231 DeltaPHI_moving_max_15ft (0.003673)
82. feature 176 PHIND_moving_av_20ft (0.003595)
83. feature 243 PHIND_moving_max_7ft (0.003566)
84. feature 70 GR_cA_level_4 (0.003537)
85. feature 263 DeltaPHI_moving_min_5ft (0.003492)
86. feature 212 GR_moving_max_3ft (0.003473)
87. feature 229 DeltaPHI_moving_max_7ft (0.003400)
88. feature 260 ILD_log10_moving_min_20ft (0.003384)
89. feature 168 PE_moving_av_2ft (0.003371)
90. feature 22 PE_cD_level_4 (0.003364)
91. feature 245 PHIND_moving_max_15ft (0.003225)
92. feature 274 PE_moving_min_20ft (0.003152)
93. feature 82 PE_cA_level_4 (0.003132)
94. feature 87 GR_entropy_foot2 (0.003090)
95. feature 27 GR_cA_level_1 (0.003087)
96. feature 10 GR_cD_level_4 (0.003044)
97. feature 54 ILD_log10_cD_level_4 (0.003015)
98. feature 40 PE_cA_level_2 (0.002986)
99. feature 62 PE_cD_level_4 (0.002894)
100. feature 2 DeltaPHI (0.002862)
101. feature 98 ILD_log10_entropy_foot10 (0.002792)
102. feature 78 DeltaPHI_cA_level_4 (0.002736)
103. feature 266 DeltaPHI_moving_min_15ft (0.002735)
104. feature 45 PHIND_cA_level_3 (0.002653)
105. feature 261 DeltaPHI_moving_min_3ft (0.002625)
106. feature 166 DeltaPHI_moving_av_20ft (0.002619)
107. feature 46 PHIND_cA_level_4 (0.002609)
108. feature 154 GR_moving_av_5ft (0.002592)
109. feature 77 DeltaPHI_cA_level_3 (0.002551)
110. feature 232 DeltaPHI_moving_max_20ft (0.002545)
111. feature 252 GR_moving_min_15ft (0.002541)
112. feature 268 PE_moving_min_3ft (0.002493)
113. feature 9 GR_cD_level_3 (0.002458)
114. feature 175 PHIND_moving_av_10ft (0.002419)
115. feature 227 DeltaPHI_moving_max_4ft (0.002415)
116. feature 30 GR_cA_level_4 (0.002377)
117. feature 211 PHIND_moving_std_20ft (0.002376)
118. feature 244 PHIND_moving_max_10ft (0.002362)
119. feature 267 DeltaPHI_moving_min_20ft (0.002321)
120. feature 163 DeltaPHI_moving_av_2ft (0.002276)
121. feature 253 GR_moving_min_20ft (0.002248)
122. feature 210 PHIND_moving_std_15ft (0.002224)
123. feature 228 DeltaPHI_moving_max_5ft (0.002209)
124. feature 92 GR_entropy_foot10 (0.002205)
125. feature 279 PHIND_moving_min_10ft (0.002189)
126. feature 68 GR_cA_level_2 (0.002179)
127. feature 225 ILD_log10_moving_max_20ft (0.002164)
128. feature 35 DeltaPHI_cA_level_1 (0.002157)
129. feature 196 DeltaPHI_moving_std_15ft (0.002150)
130. feature 235 PE_moving_max_5ft (0.002136)
131. feature 237 PE_moving_max_10ft (0.002133)
132. feature 233 PE_moving_max_3ft (0.002054)
133. feature 17 DeltaPHI_cD_level_3 (0.002046)
134. feature 44 PHIND_cA_level_2 (0.002030)
135. feature 197 DeltaPHI_moving_std_20ft (0.002011)
136. feature 74 ILD_log10_cA_level_4 (0.002005)
137. feature 69 GR_cA_level_3 (0.001974)
138. feature 84 PHIND_cA_level_2 (0.001971)
139. feature 234 PE_moving_max_4ft (0.001941)
140. feature 36 DeltaPHI_cA_level_2 (0.001940)
141. feature 265 DeltaPHI_moving_min_10ft (0.001915)
142. feature 25 PHIND_cD_level_3 (0.001901)
143. feature 280 PHIND_moving_min_15ft (0.001895)
144. feature 156 GR_moving_av_20ft (0.001888)
145. feature 42 PE_cA_level_4 (0.001869)
146. feature 226 DeltaPHI_moving_max_3ft (0.001856)
147. feature 113 PHIND_entropy_foot4 (0.001846)
148. feature 262 DeltaPHI_moving_min_4ft (0.001806)
149. feature 58 DeltaPHI_cD_level_4 (0.001804)
150. feature 66 PHIND_cD_level_4 (0.001798)
151. feature 203 PE_moving_std_15ft (0.001797)
152. feature 162 DeltaPHI_moving_av_1ft (0.001776)
153. feature 164 DeltaPHI_moving_av_5ft (0.001759)
154. feature 88 GR_entropy_foot3 (0.001689)
155. feature 14 ILD_log10_cD_level_4 (0.001689)
156. feature 18 DeltaPHI_cD_level_4 (0.001684)
157. feature 90 GR_entropy_foot5 (0.001682)
158. feature 89 GR_entropy_foot4 (0.001646)
159. feature 215 GR_moving_max_7ft (0.001638)
160. feature 217 GR_moving_max_15ft (0.001597)
161. feature 73 ILD_log10_cA_level_3 (0.001554)
162. feature 28 GR_cA_level_2 (0.001541)
163. feature 216 GR_moving_max_10ft (0.001515)
164. feature 218 GR_moving_max_20ft (0.001498)
165. feature 273 PE_moving_min_15ft (0.001492)
166. feature 190 ILD_log10_moving_std_20ft (0.001424)
167. feature 189 ILD_log10_moving_std_15ft (0.001422)
168. feature 50 GR_cD_level_4 (0.001421)
169. feature 258 ILD_log10_moving_min_10ft (0.001419)
170. feature 94 ILD_log10_entropy_foot3 (0.001407)
171. feature 182 GR_moving_std_15ft (0.001398)
172. feature 202 PE_moving_std_10ft (0.001389)
173. feature 97 ILD_log10_entropy_foot7 (0.001343)
174. feature 67 GR_cA_level_1 (0.001277)
175. feature 65 PHIND_cD_level_3 (0.001261)
176. feature 183 GR_moving_std_20ft (0.001236)
177. feature 61 PE_cD_level_3 (0.001223)
178. feature 96 ILD_log10_entropy_foot5 (0.001221)
179. feature 12 ILD_log10_cD_level_2 (0.001213)
180. feature 29 GR_cA_level_3 (0.001206)
181. feature 209 PHIND_moving_std_10ft (0.001194)
182. feature 115 PHIND_entropy_foot7 (0.001187)
183. feature 213 GR_moving_max_4ft (0.001178)
184. feature 57 DeltaPHI_cD_level_3 (0.001171)
185. feature 95 ILD_log10_entropy_foot4 (0.001152)
186. feature 281 PHIND_moving_min_20ft (0.001129)
187. feature 181 GR_moving_std_10ft (0.001116)
188. feature 8 GR_cD_level_2 (0.001071)
189. feature 80 PE_cA_level_2 (0.001048)
190. feature 26 PHIND_cD_level_4 (0.001042)
191. feature 111 PHIND_entropy_foot2 (0.001025)
192. feature 179 GR_moving_std_5ft (0.001022)
193. feature 75 DeltaPHI_cA_level_1 (0.001017)
194. feature 214 GR_moving_max_5ft (0.001001)
195. feature 180 GR_moving_std_7ft (0.000997)
196. feature 85 PHIND_cA_level_3 (0.000971)
197. feature 114 PHIND_entropy_foot5 (0.000971)
198. feature 201 PE_moving_std_7ft (0.000905)
199. feature 72 ILD_log10_cA_level_2 (0.000866)
200. feature 13 ILD_log10_cD_level_3 (0.000861)
201. feature 186 ILD_log10_moving_std_5ft (0.000842)
202. feature 194 DeltaPHI_moving_std_7ft (0.000805)
203. feature 21 PE_cD_level_3 (0.000796)
204. feature 93 ILD_log10_entropy_foot2 (0.000760)
205. feature 60 PE_cD_level_2 (0.000742)
206. feature 64 PHIND_cD_level_2 (0.000741)
207. feature 200 PE_moving_std_5ft (0.000726)
208. feature 139 PEgradient_dx3 (0.000719)
209. feature 187 ILD_log10_moving_std_7ft (0.000715)
210. feature 7 GR_cD_level_1 (0.000714)
211. feature 24 PHIND_cD_level_2 (0.000688)
212. feature 53 ILD_log10_cD_level_3 (0.000687)
213. feature 206 PHIND_moving_std_4ft (0.000684)
214. feature 112 PHIND_entropy_foot3 (0.000655)
215. feature 4 PE (0.000629)
216. feature 76 DeltaPHI_cA_level_2 (0.000592)
217. feature 71 ILD_log10_cA_level_1 (0.000589)
218. feature 55 DeltaPHI_cD_level_1 (0.000575)
219. feature 100 DeltaPHI_entropy_foot3 (0.000571)
220. feature 208 PHIND_moving_std_7ft (0.000568)
221. feature 207 PHIND_moving_std_5ft (0.000560)
222. feature 104 DeltaPHI_entropy_foot10 (0.000551)
223. feature 195 DeltaPHI_moving_std_10ft (0.000547)
224. feature 205 PHIND_moving_std_3ft (0.000535)
225. feature 116 PHIND_entropy_foot10 (0.000525)
226. feature 188 ILD_log10_moving_std_10ft (0.000519)
227. feature 101 DeltaPHI_entropy_foot4 (0.000516)
228. feature 192 DeltaPHI_moving_std_4ft (0.000513)
229. feature 49 GR_cD_level_3 (0.000511)
230. feature 128 ILD_log10gradient_dx6 (0.000503)
231. feature 140 PEgradient_dx4 (0.000470)
232. feature 56 DeltaPHI_cD_level_2 (0.000447)
233. feature 19 PE_cD_level_1 (0.000443)
234. feature 119 GRgradient_dx4 (0.000442)
235. feature 122 GRgradient_dx10 (0.000438)
236. feature 191 DeltaPHI_moving_std_3ft (0.000429)
237. feature 198 PE_moving_std_3ft (0.000415)
238. feature 138 PEgradient_dx2 (0.000411)
239. feature 129 ILD_log10gradient_dx10 (0.000406)
240. feature 193 DeltaPHI_moving_std_5ft (0.000394)
241. feature 145 PHINDgradient_dx2 (0.000389)
242. feature 125 ILD_log10gradient_dx3 (0.000387)
243. feature 52 ILD_log10_cD_level_2 (0.000387)
244. feature 151 PHINDgradient_dx20 (0.000368)
245. feature 127 ILD_log10gradient_dx5 (0.000368)
246. feature 121 GRgradient_dx6 (0.000347)
247. feature 132 DeltaPHIgradient_dx3 (0.000344)
248. feature 99 DeltaPHI_entropy_foot2 (0.000341)
249. feature 51 ILD_log10_cD_level_1 (0.000336)
250. feature 63 PHIND_cD_level_1 (0.000321)
251. feature 146 PHINDgradient_dx3 (0.000315)
252. feature 147 PHINDgradient_dx4 (0.000314)
253. feature 141 PEgradient_dx5 (0.000312)
254. feature 136 DeltaPHIgradient_dx10 (0.000311)
255. feature 103 DeltaPHI_entropy_foot7 (0.000310)
256. feature 167 PE_moving_av_1ft (0.000301)
257. feature 126 ILD_log10gradient_dx4 (0.000293)
258. feature 149 PHINDgradient_dx6 (0.000293)
259. feature 48 GR_cD_level_2 (0.000289)
260. feature 102 DeltaPHI_entropy_foot5 (0.000280)
261. feature 118 GRgradient_dx3 (0.000279)
262. feature 15 DeltaPHI_cD_level_1 (0.000274)
263. feature 11 ILD_log10_cD_level_1 (0.000258)
264. feature 148 PHINDgradient_dx5 (0.000247)
265. feature 123 GRgradient_dx20 (0.000244)
266. feature 144 PEgradient_dx20 (0.000236)
267. feature 150 PHINDgradient_dx10 (0.000231)
268. feature 124 ILD_log10gradient_dx2 (0.000230)
269. feature 108 PE_entropy_foot5 (0.000223)
270. feature 142 PEgradient_dx6 (0.000223)
271. feature 185 ILD_log10_moving_std_4ft (0.000197)
272. feature 16 DeltaPHI_cD_level_2 (0.000194)
273. feature 135 DeltaPHIgradient_dx6 (0.000187)
274. feature 178 GR_moving_std_4ft (0.000178)
275. feature 20 PE_cD_level_2 (0.000170)
276. feature 59 PE_cD_level_1 (0.000169)
277. feature 130 ILD_log10gradient_dx20 (0.000166)
278. feature 23 PHIND_cD_level_1 (0.000166)
279. feature 137 DeltaPHIgradient_dx20 (0.000162)
280. feature 134 DeltaPHIgradient_dx5 (0.000160)
281. feature 133 DeltaPHIgradient_dx4 (0.000151)
282. feature 143 PEgradient_dx10 (0.000150)
283. feature 120 GRgradient_dx5 (0.000132)
284. feature 110 PE_entropy_foot10 (0.000127)
285. feature 106 PE_entropy_foot3 (0.000118)
286. feature 47 GR_cD_level_1 (0.000105)
287. feature 109 PE_entropy_foot7 (0.000104)
288. feature 184 ILD_log10_moving_std_3ft (0.000100)
289. feature 107 PE_entropy_foot4 (0.000093)
290. feature 177 GR_moving_std_3ft (0.000077)
291. feature 131 DeltaPHIgradient_dx2 (0.000072)
292. feature 199 PE_moving_std_4ft (0.000071)
293. feature 117 GRgradient_dx2 (0.000065)
294. feature 91 GR_entropy_foot7 (0.000041)
295. feature 105 PE_entropy_foot2 (0.000000)
###Markdown
Plot the feature importances of the forest
###Code
sns.set_style('white')
fig,ax = plt.subplots(figsize=(15,5))
ax.bar(range(X.shape[1]), importances[indices],color="r", align="center")
plt.ylabel("Feature importance")
plt.xlabel('Ranked features')
plt.xticks([], indices)
plt.xlim([-1, X.shape[1]]);
###Output
_____no_output_____
###Markdown
Features derived from raw geological variables tend to have the highest classification power. Rolling min, max and mean tend to have better classification power than raw data. Wavelet approximation coeficients tend to have a similar to lower classification power than raw data. Features expressing local texture of the data (entropy, gradient, standard deviation and wavelet detail coeficients) have a low classification power but still participate in the prediction. Confusion matrixThe confusion matrix from the validation test is presented below.
###Code
######## Confusion matrix from this tuning
cv=LeaveOneGroupOut().split(X, y, groups)
y_pred = cross_val_predict(Cl, X, y, cv=cv, n_jobs=-1)
conf_mat = confusion_matrix(y, y_pred)
list_facies = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
conf_mat = pd.DataFrame(conf_mat, columns=list_facies, index=list_facies)
conf_mat.head(10)
###Output
_____no_output_____
###Markdown
Applying the classification model to test data
###Code
filename = '../validation_data_nofacies.csv'
test_data = pd.read_csv(filename)
test_data.head(5)
##### cD From wavelet db1
dwt_db1_cD_df = FE.make_dwt_vars_cD(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cA From wavelet db1
dwt_db1_cA_df = FE.make_dwt_vars_cA(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cD From wavelet db3
dwt_db3_cD_df = FE.make_dwt_vars_cD(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### cA From wavelet db3
dwt_db3_cA_df = FE.make_dwt_vars_cA(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### From entropy
entropy_df = FE.make_entropy_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
l_foots=[2, 3, 4, 5, 7, 10])
###### From gradient
gradient_df = FE.make_gradient_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
dx_list=[2, 3, 4, 5, 6, 10, 20])
##### From rolling average
moving_av_df = FE.make_moving_av_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[1, 2, 5, 10, 20])
##### From rolling standard deviation
moving_std_df = FE.make_moving_std_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
##### From rolling max
moving_max_df = FE.make_moving_max_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3, 4, 5, 7, 10, 15, 20])
##### From rolling min
moving_min_df = FE.make_moving_min_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
###### From rolling NM/M ratio
rolling_marine_ratio_df = FE.make_rolling_marine_ratio_vars(wells_df=test_data, windows=[5, 10, 15, 20, 30, 50, 75, 100, 200])
###### From distance to NM and M, up and down
dist_M_up_df = FE.make_distance_to_M_up_vars(wells_df=test_data)
dist_M_down_df = FE.make_distance_to_M_down_vars(wells_df=test_data)
dist_NM_up_df = FE.make_distance_to_NM_up_vars(wells_df=test_data)
dist_NM_down_df = FE.make_distance_to_NM_down_vars(wells_df=test_data)
combined_test_df = test_data
list_df_var = [dwt_db1_cD_df, dwt_db1_cA_df, dwt_db3_cD_df, dwt_db3_cA_df,
entropy_df, gradient_df, moving_av_df, moving_std_df, moving_max_df, moving_min_df,
rolling_marine_ratio_df, dist_M_up_df, dist_M_down_df, dist_NM_up_df, dist_NM_down_df]
for var_df in list_df_var:
temp_df = var_df
combined_test_df = pd.concat([combined_test_df,temp_df],axis=1)
combined_test_df.replace(to_replace=np.nan, value='-99999', inplace=True)
X_test = combined_test_df.iloc[:, 3:]
print (combined_test_df.shape)
combined_test_df.head(5)
Cl = RandomForestClassifier(n_estimators=100, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42, n_jobs=-1)
Cl.fit(X, y)
y_test = Cl.predict(X_test)
y_test = pd.DataFrame(y_test, columns=['Predicted Facies'])
test_pred_df = pd.concat([combined_test_df[['Well Name', 'Depth']], y_test], axis=1)
test_pred_df.head()
###Output
_____no_output_____
###Markdown
Exporting results
###Code
test_pred_df.to_pickle('Prediction_blind_wells_RF_c.pkl')
###Output
_____no_output_____
###Markdown
Facies classification using Random Forest Contest entry by geoLEARN: Martin Blouin, Lorenzo Perozzi and Antoine Caté in collaboration with Erwan Gloaguen [Original contest notebook](../Facies_classification.ipynb) by Brendon Hall, [Enthought](https://www.enthought.com/)In this notebook we will train a machine learning algorithm to predict facies from well log data. The dataset comes from a class exercise from The University of Kansas on [Neural Networks and Fuzzy Systems](http://www.people.ku.edu/~gbohling/EECS833/). This exercise is based on a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. For more info on the origin of the data, see [Bohling and Dubois (2003)](http://www.kgs.ku.edu/PRS/publication/2003/ofr2003-50.pdf) and [Dubois et al. (2007)](http://dx.doi.org/10.1016/j.cageo.2006.08.011).The dataset consists of log data from nine wells that have been labeled with a facies type based on observation of core. We will use this log data to train a Random Forest model to classify facies types. Exploring the datasetFirst, we import and examine the dataset used to train the classifier.
###Code
###### Importing all used packages
%matplotlib inline
import warnings
warnings.filterwarnings('ignore')
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
import seaborn as sns
from pandas import set_option
# set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
###### Import packages needed for the make_vars functions
import Feature_Engineering as FE
##### import stuff from scikit learn
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import KFold, cross_val_score,LeavePGroupsOut, LeaveOneGroupOut, cross_val_predict
from sklearn.metrics import confusion_matrix, make_scorer, f1_score, accuracy_score, recall_score, precision_score
filename = '../facies_vectors.csv'
training_data = pd.read_csv(filename)
training_data.head()
training_data.describe()
###Output
_____no_output_____
###Markdown
A complete description of the dataset is given in the [Original contest notebook](../Facies_classification.ipynb) by Brendon Hall, [Enthought](https://www.enthought.com/). A total of four measured rock properties and two interpreted geological properties are given as raw predictor variables for the prediction of the "Facies" class. Feature engineeringAs stated in our [previous submission](Submission_increased_variance.ipynb), we believe that feature engineering has a high potential for increasing classification success. A strategy for building new variables is explained below.The dataset is distributed along a series of drillholes intersecting a stratigraphic sequence. Sedimentary facies tend to be deposited in sequences that reflect the evolution of the paleo-environment (variations in water depth, water temperature, biological activity, currents strenght, detrital input, ...). Each facies represents a specific depositional environment and is in contact with facies that represent a progressive transition to an other environment.Thus, there is a relationship between neighbouring samples, and the distribution of the data along drillholes can be as important as data values for predicting facies.A series of new variables (features) are calculated and tested below to help represent the relationship of neighbouring samples and the overall texture of the data along drillholes. These variables are:- detail and approximation coeficients at various levels of two [wavelet transforms](https://en.wikipedia.org/wiki/Discrete_wavelet_transform) (using two types of [Daubechies](https://en.wikipedia.org/wiki/Daubechies_wavelet) wavelets);- measures of the local entropy with variable observation windows;- measures of the local gradient with variable observation windows;- rolling statistical calculations (i.e., mean, standard deviation, min and max) with variable observation windows;- ratios between marine and non-marine lithofacies with different observation windows;- distances from the nearest marine or non-marine occurence uphole and downhole.Functions used to build these variables are located in the [Feature Engineering](Feature_Engineering.py) python script.All the data exploration work related to the conception and study of these variables is not presented here.
###Code
##### cD From wavelet db1
dwt_db1_cD_df = FE.make_dwt_vars_cD(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cA From wavelet db1
dwt_db1_cA_df = FE.make_dwt_vars_cA(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cD From wavelet db3
dwt_db3_cD_df = FE.make_dwt_vars_cD(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### cA From wavelet db3
dwt_db3_cA_df = FE.make_dwt_vars_cA(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### From entropy
entropy_df = FE.make_entropy_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
l_foots=[2, 3, 4, 5, 7, 10])
###### From gradient
gradient_df = FE.make_gradient_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
dx_list=[2, 3, 4, 5, 6, 10, 20])
##### From rolling average
moving_av_df = FE.make_moving_av_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[1, 2, 5, 10, 20])
##### From rolling standard deviation
moving_std_df = FE.make_moving_std_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
##### From rolling max
moving_max_df = FE.make_moving_max_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3, 4, 5, 7, 10, 15, 20])
##### From rolling min
moving_min_df = FE.make_moving_min_vars(wells_df=training_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
###### From rolling NM/M ratio
rolling_marine_ratio_df = FE.make_rolling_marine_ratio_vars(wells_df=training_data, windows=[5, 10, 15, 20, 30, 50, 75, 100, 200])
###### From distance to NM and M, up and down
dist_M_up_df = FE.make_distance_to_M_up_vars(wells_df=training_data)
dist_M_down_df = FE.make_distance_to_M_down_vars(wells_df=training_data)
dist_NM_up_df = FE.make_distance_to_NM_up_vars(wells_df=training_data)
dist_NM_down_df = FE.make_distance_to_NM_down_vars(wells_df=training_data)
list_df_var = [dwt_db1_cD_df, dwt_db1_cA_df, dwt_db3_cD_df, dwt_db3_cA_df,
entropy_df, gradient_df, moving_av_df, moving_std_df, moving_max_df, moving_min_df,
rolling_marine_ratio_df, dist_M_up_df, dist_M_down_df, dist_NM_up_df, dist_NM_down_df]
combined_df = training_data
for var_df in list_df_var:
temp_df = var_df
combined_df = pd.concat([combined_df,temp_df],axis=1)
combined_df.replace(to_replace=np.nan, value='-1', inplace=True)
print (combined_df.shape)
combined_df.head(5)
###Output
(4149, 299)
###Markdown
Building a prediction model from these variablesA Random Forest model is built here to test the effect of these new variables on the prediction power. Algorithm parameters have been tuned so as to take into account the non-stationarity of the training and testing sets using the LeaveOneGroupOut cross-validation strategy. The size of individual tree leafs and nodes has been increased to the maximum possible without significantly increasing the variance so as to reduce the bias of the prediction.Box plot for a series of scores obtained through cross validation are presented below. Create predictor and target arrays
###Code
X = combined_df.iloc[:, 4:]
y = combined_df['Facies']
groups = combined_df['Well Name']
###Output
_____no_output_____
###Markdown
Estimation of validation scores from this tuning
###Code
scoring_param = ['accuracy', 'recall_weighted', 'precision_weighted','f1_weighted']
scores = []
Cl = RandomForestClassifier(n_estimators=100, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42, n_jobs=-1)
lpgo = LeavePGroupsOut(n_groups=2)
for scoring in scoring_param:
cv=lpgo.split(X, y, groups)
validated = cross_val_score(Cl, X, y, scoring=scoring, cv=cv, n_jobs=-1)
scores.append(validated)
scores = np.array(scores)
scores = np.swapaxes(scores, 0, 1)
scores = pd.DataFrame(data=scores, columns=scoring_param)
sns.set_style('white')
fig,ax = plt.subplots(figsize=(8,6))
sns.boxplot(data=scores)
plt.xlabel('scoring parameters')
plt.ylabel('score')
plt.title('Classification scores for tuned parameters');
###Output
_____no_output_____
###Markdown
Evaluating feature importancesThe individual contribution to the classification for each feature (i.e., feature importances) can be obtained from a Random Forest classifier. This gives a good idea of the classification power of individual features and helps understanding which type of feature engineering is the most promising.Caution should be taken when interpreting feature importances, as highly correlated variables will tend to dilute their classification power between themselves and will rank lower than uncorelated variables.
###Code
####### Evaluation of feature importances
Cl = RandomForestClassifier(n_estimators=75, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42,oob_score=True, n_jobs=-1)
Cl.fit(X, y)
print ('OOB estimate of accuracy for prospectivity classification using all features: %s' % str(Cl.oob_score_))
importances = Cl.feature_importances_
std = np.std([tree.feature_importances_ for tree in Cl.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
print("Feature ranking:")
Vars = list(X.columns.values)
for f in range(X.shape[1]):
print("%d. feature %d %s (%f)" % (f + 1, indices[f], Vars[indices[f]], importances[indices[f]]))
###Output
OOB estimate of accuracy for prospectivity classification using all features: 0.723065798988
Feature ranking:
1. feature 282 Marine_ratio_5_centered (0.031530)
2. feature 293 dist_NM_up (0.030115)
3. feature 288 Marine_ratio_75_centered (0.029691)
4. feature 292 dist_M_down (0.027875)
5. feature 284 Marine_ratio_15_centered (0.027651)
6. feature 285 Marine_ratio_20_centered (0.026680)
7. feature 290 Marine_ratio_200_centered (0.026423)
8. feature 286 Marine_ratio_30_centered (0.022103)
9. feature 5 NM_M (0.021090)
10. feature 289 Marine_ratio_100_centered (0.018909)
11. feature 287 Marine_ratio_50_centered (0.017506)
12. feature 283 Marine_ratio_10_centered (0.016424)
13. feature 171 PE_moving_av_20ft (0.014572)
14. feature 249 GR_moving_min_5ft (0.012678)
15. feature 291 dist_M_up (0.011630)
16. feature 250 GR_moving_min_7ft (0.011605)
17. feature 81 PE_cA_level_3 (0.010347)
18. feature 254 ILD_log10_moving_min_3ft (0.009445)
19. feature 272 PE_moving_min_10ft (0.009424)
20. feature 294 dist_NM_down (0.008873)
21. feature 219 ILD_log10_moving_max_3ft (0.008808)
22. feature 241 PHIND_moving_max_4ft (0.008783)
23. feature 224 ILD_log10_moving_max_15ft (0.008776)
24. feature 248 GR_moving_min_4ft (0.008697)
25. feature 0 GR (0.008360)
26. feature 223 ILD_log10_moving_max_10ft (0.008254)
27. feature 271 PE_moving_min_7ft (0.008216)
28. feature 170 PE_moving_av_10ft (0.007934)
29. feature 251 GR_moving_min_10ft (0.007851)
30. feature 277 PHIND_moving_min_5ft (0.007727)
31. feature 275 PHIND_moving_min_3ft (0.007675)
32. feature 257 ILD_log10_moving_min_7ft (0.007360)
33. feature 240 PHIND_moving_max_3ft (0.007212)
34. feature 247 GR_moving_min_3ft (0.007172)
35. feature 159 ILD_log10_moving_av_5ft (0.007125)
36. feature 221 ILD_log10_moving_max_5ft (0.007063)
37. feature 174 PHIND_moving_av_5ft (0.006821)
38. feature 37 DeltaPHI_cA_level_3 (0.006339)
39. feature 160 ILD_log10_moving_av_10ft (0.006312)
40. feature 255 ILD_log10_moving_min_4ft (0.006159)
41. feature 259 ILD_log10_moving_min_15ft (0.005927)
42. feature 269 PE_moving_min_4ft (0.005768)
43. feature 31 ILD_log10_cA_level_1 (0.005768)
44. feature 239 PE_moving_max_20ft (0.005693)
45. feature 152 GR_moving_av_1ft (0.005620)
46. feature 1 ILD_log10 (0.005587)
47. feature 3 PHIND (0.005504)
48. feature 6 RELPOS (0.005443)
49. feature 153 GR_moving_av_2ft (0.005402)
50. feature 34 ILD_log10_cA_level_4 (0.005058)
51. feature 238 PE_moving_max_15ft (0.005040)
52. feature 157 ILD_log10_moving_av_1ft (0.004991)
53. feature 43 PHIND_cA_level_1 (0.004968)
54. feature 246 PHIND_moving_max_20ft (0.004958)
55. feature 276 PHIND_moving_min_4ft (0.004852)
56. feature 230 DeltaPHI_moving_max_10ft (0.004707)
57. feature 173 PHIND_moving_av_2ft (0.004687)
58. feature 39 PE_cA_level_1 (0.004664)
59. feature 32 ILD_log10_cA_level_2 (0.004654)
60. feature 242 PHIND_moving_max_5ft (0.004548)
61. feature 172 PHIND_moving_av_1ft (0.004467)
62. feature 41 PE_cA_level_3 (0.004452)
63. feature 158 ILD_log10_moving_av_2ft (0.004395)
64. feature 256 ILD_log10_moving_min_5ft (0.004373)
65. feature 264 DeltaPHI_moving_min_7ft (0.004358)
66. feature 222 ILD_log10_moving_max_7ft (0.004249)
67. feature 165 DeltaPHI_moving_av_10ft (0.004245)
68. feature 270 PE_moving_min_5ft (0.004242)
69. feature 161 ILD_log10_moving_av_20ft (0.004107)
70. feature 38 DeltaPHI_cA_level_4 (0.004102)
71. feature 155 GR_moving_av_10ft (0.004062)
72. feature 236 PE_moving_max_7ft (0.003970)
73. feature 204 PE_moving_std_20ft (0.003910)
74. feature 79 PE_cA_level_1 (0.003905)
75. feature 83 PHIND_cA_level_1 (0.003900)
76. feature 278 PHIND_moving_min_7ft (0.003852)
77. feature 33 ILD_log10_cA_level_3 (0.003812)
78. feature 220 ILD_log10_moving_max_4ft (0.003765)
79. feature 169 PE_moving_av_5ft (0.003751)
80. feature 86 PHIND_cA_level_4 (0.003729)
81. feature 231 DeltaPHI_moving_max_15ft (0.003673)
82. feature 176 PHIND_moving_av_20ft (0.003595)
83. feature 243 PHIND_moving_max_7ft (0.003566)
84. feature 70 GR_cA_level_4 (0.003537)
85. feature 263 DeltaPHI_moving_min_5ft (0.003492)
86. feature 212 GR_moving_max_3ft (0.003473)
87. feature 229 DeltaPHI_moving_max_7ft (0.003400)
88. feature 260 ILD_log10_moving_min_20ft (0.003384)
89. feature 168 PE_moving_av_2ft (0.003371)
90. feature 22 PE_cD_level_4 (0.003364)
91. feature 245 PHIND_moving_max_15ft (0.003225)
92. feature 274 PE_moving_min_20ft (0.003152)
93. feature 82 PE_cA_level_4 (0.003132)
94. feature 87 GR_entropy_foot2 (0.003090)
95. feature 27 GR_cA_level_1 (0.003087)
96. feature 10 GR_cD_level_4 (0.003044)
97. feature 54 ILD_log10_cD_level_4 (0.003015)
98. feature 40 PE_cA_level_2 (0.002986)
99. feature 62 PE_cD_level_4 (0.002894)
100. feature 2 DeltaPHI (0.002862)
101. feature 98 ILD_log10_entropy_foot10 (0.002792)
102. feature 78 DeltaPHI_cA_level_4 (0.002736)
103. feature 266 DeltaPHI_moving_min_15ft (0.002735)
104. feature 45 PHIND_cA_level_3 (0.002653)
105. feature 261 DeltaPHI_moving_min_3ft (0.002625)
106. feature 166 DeltaPHI_moving_av_20ft (0.002619)
107. feature 46 PHIND_cA_level_4 (0.002609)
108. feature 154 GR_moving_av_5ft (0.002592)
109. feature 77 DeltaPHI_cA_level_3 (0.002551)
110. feature 232 DeltaPHI_moving_max_20ft (0.002545)
111. feature 252 GR_moving_min_15ft (0.002541)
112. feature 268 PE_moving_min_3ft (0.002493)
113. feature 9 GR_cD_level_3 (0.002458)
114. feature 175 PHIND_moving_av_10ft (0.002419)
115. feature 227 DeltaPHI_moving_max_4ft (0.002415)
116. feature 30 GR_cA_level_4 (0.002377)
117. feature 211 PHIND_moving_std_20ft (0.002376)
118. feature 244 PHIND_moving_max_10ft (0.002362)
119. feature 267 DeltaPHI_moving_min_20ft (0.002321)
120. feature 163 DeltaPHI_moving_av_2ft (0.002276)
121. feature 253 GR_moving_min_20ft (0.002248)
122. feature 210 PHIND_moving_std_15ft (0.002224)
123. feature 228 DeltaPHI_moving_max_5ft (0.002209)
124. feature 92 GR_entropy_foot10 (0.002205)
125. feature 279 PHIND_moving_min_10ft (0.002189)
126. feature 68 GR_cA_level_2 (0.002179)
127. feature 225 ILD_log10_moving_max_20ft (0.002164)
128. feature 35 DeltaPHI_cA_level_1 (0.002157)
129. feature 196 DeltaPHI_moving_std_15ft (0.002150)
130. feature 235 PE_moving_max_5ft (0.002136)
131. feature 237 PE_moving_max_10ft (0.002133)
132. feature 233 PE_moving_max_3ft (0.002054)
133. feature 17 DeltaPHI_cD_level_3 (0.002046)
134. feature 44 PHIND_cA_level_2 (0.002030)
135. feature 197 DeltaPHI_moving_std_20ft (0.002011)
136. feature 74 ILD_log10_cA_level_4 (0.002005)
137. feature 69 GR_cA_level_3 (0.001974)
138. feature 84 PHIND_cA_level_2 (0.001971)
139. feature 234 PE_moving_max_4ft (0.001941)
140. feature 36 DeltaPHI_cA_level_2 (0.001940)
141. feature 265 DeltaPHI_moving_min_10ft (0.001915)
142. feature 25 PHIND_cD_level_3 (0.001901)
143. feature 280 PHIND_moving_min_15ft (0.001895)
144. feature 156 GR_moving_av_20ft (0.001888)
145. feature 42 PE_cA_level_4 (0.001869)
146. feature 226 DeltaPHI_moving_max_3ft (0.001856)
147. feature 113 PHIND_entropy_foot4 (0.001846)
148. feature 262 DeltaPHI_moving_min_4ft (0.001806)
149. feature 58 DeltaPHI_cD_level_4 (0.001804)
150. feature 66 PHIND_cD_level_4 (0.001798)
151. feature 203 PE_moving_std_15ft (0.001797)
152. feature 162 DeltaPHI_moving_av_1ft (0.001776)
153. feature 164 DeltaPHI_moving_av_5ft (0.001759)
154. feature 88 GR_entropy_foot3 (0.001689)
155. feature 14 ILD_log10_cD_level_4 (0.001689)
156. feature 18 DeltaPHI_cD_level_4 (0.001684)
157. feature 90 GR_entropy_foot5 (0.001682)
158. feature 89 GR_entropy_foot4 (0.001646)
159. feature 215 GR_moving_max_7ft (0.001638)
160. feature 217 GR_moving_max_15ft (0.001597)
161. feature 73 ILD_log10_cA_level_3 (0.001554)
162. feature 28 GR_cA_level_2 (0.001541)
163. feature 216 GR_moving_max_10ft (0.001515)
164. feature 218 GR_moving_max_20ft (0.001498)
165. feature 273 PE_moving_min_15ft (0.001492)
166. feature 190 ILD_log10_moving_std_20ft (0.001424)
167. feature 189 ILD_log10_moving_std_15ft (0.001422)
168. feature 50 GR_cD_level_4 (0.001421)
169. feature 258 ILD_log10_moving_min_10ft (0.001419)
170. feature 94 ILD_log10_entropy_foot3 (0.001407)
171. feature 182 GR_moving_std_15ft (0.001398)
172. feature 202 PE_moving_std_10ft (0.001389)
173. feature 97 ILD_log10_entropy_foot7 (0.001343)
174. feature 67 GR_cA_level_1 (0.001277)
175. feature 65 PHIND_cD_level_3 (0.001261)
176. feature 183 GR_moving_std_20ft (0.001236)
177. feature 61 PE_cD_level_3 (0.001223)
178. feature 96 ILD_log10_entropy_foot5 (0.001221)
179. feature 12 ILD_log10_cD_level_2 (0.001213)
180. feature 29 GR_cA_level_3 (0.001206)
181. feature 209 PHIND_moving_std_10ft (0.001194)
182. feature 115 PHIND_entropy_foot7 (0.001187)
183. feature 213 GR_moving_max_4ft (0.001178)
184. feature 57 DeltaPHI_cD_level_3 (0.001171)
185. feature 95 ILD_log10_entropy_foot4 (0.001152)
186. feature 281 PHIND_moving_min_20ft (0.001129)
187. feature 181 GR_moving_std_10ft (0.001116)
188. feature 8 GR_cD_level_2 (0.001071)
189. feature 80 PE_cA_level_2 (0.001048)
190. feature 26 PHIND_cD_level_4 (0.001042)
191. feature 111 PHIND_entropy_foot2 (0.001025)
192. feature 179 GR_moving_std_5ft (0.001022)
193. feature 75 DeltaPHI_cA_level_1 (0.001017)
194. feature 214 GR_moving_max_5ft (0.001001)
195. feature 180 GR_moving_std_7ft (0.000997)
196. feature 85 PHIND_cA_level_3 (0.000971)
197. feature 114 PHIND_entropy_foot5 (0.000971)
198. feature 201 PE_moving_std_7ft (0.000905)
199. feature 72 ILD_log10_cA_level_2 (0.000866)
200. feature 13 ILD_log10_cD_level_3 (0.000861)
201. feature 186 ILD_log10_moving_std_5ft (0.000842)
202. feature 194 DeltaPHI_moving_std_7ft (0.000805)
203. feature 21 PE_cD_level_3 (0.000796)
204. feature 93 ILD_log10_entropy_foot2 (0.000760)
205. feature 60 PE_cD_level_2 (0.000742)
206. feature 64 PHIND_cD_level_2 (0.000741)
207. feature 200 PE_moving_std_5ft (0.000726)
208. feature 139 PEgradient_dx3 (0.000719)
209. feature 187 ILD_log10_moving_std_7ft (0.000715)
210. feature 7 GR_cD_level_1 (0.000714)
211. feature 24 PHIND_cD_level_2 (0.000688)
212. feature 53 ILD_log10_cD_level_3 (0.000687)
213. feature 206 PHIND_moving_std_4ft (0.000684)
214. feature 112 PHIND_entropy_foot3 (0.000655)
215. feature 4 PE (0.000629)
216. feature 76 DeltaPHI_cA_level_2 (0.000592)
217. feature 71 ILD_log10_cA_level_1 (0.000589)
218. feature 55 DeltaPHI_cD_level_1 (0.000575)
219. feature 100 DeltaPHI_entropy_foot3 (0.000571)
220. feature 208 PHIND_moving_std_7ft (0.000568)
221. feature 207 PHIND_moving_std_5ft (0.000560)
222. feature 104 DeltaPHI_entropy_foot10 (0.000551)
223. feature 195 DeltaPHI_moving_std_10ft (0.000547)
224. feature 205 PHIND_moving_std_3ft (0.000535)
225. feature 116 PHIND_entropy_foot10 (0.000525)
226. feature 188 ILD_log10_moving_std_10ft (0.000519)
227. feature 101 DeltaPHI_entropy_foot4 (0.000516)
228. feature 192 DeltaPHI_moving_std_4ft (0.000513)
229. feature 49 GR_cD_level_3 (0.000511)
230. feature 128 ILD_log10gradient_dx6 (0.000503)
231. feature 140 PEgradient_dx4 (0.000470)
232. feature 56 DeltaPHI_cD_level_2 (0.000447)
233. feature 19 PE_cD_level_1 (0.000443)
234. feature 119 GRgradient_dx4 (0.000442)
235. feature 122 GRgradient_dx10 (0.000438)
236. feature 191 DeltaPHI_moving_std_3ft (0.000429)
237. feature 198 PE_moving_std_3ft (0.000415)
238. feature 138 PEgradient_dx2 (0.000411)
239. feature 129 ILD_log10gradient_dx10 (0.000406)
240. feature 193 DeltaPHI_moving_std_5ft (0.000394)
241. feature 145 PHINDgradient_dx2 (0.000389)
242. feature 125 ILD_log10gradient_dx3 (0.000387)
243. feature 52 ILD_log10_cD_level_2 (0.000387)
244. feature 151 PHINDgradient_dx20 (0.000368)
245. feature 127 ILD_log10gradient_dx5 (0.000368)
246. feature 121 GRgradient_dx6 (0.000347)
247. feature 132 DeltaPHIgradient_dx3 (0.000344)
248. feature 99 DeltaPHI_entropy_foot2 (0.000341)
249. feature 51 ILD_log10_cD_level_1 (0.000336)
250. feature 63 PHIND_cD_level_1 (0.000321)
251. feature 146 PHINDgradient_dx3 (0.000315)
252. feature 147 PHINDgradient_dx4 (0.000314)
253. feature 141 PEgradient_dx5 (0.000312)
254. feature 136 DeltaPHIgradient_dx10 (0.000311)
255. feature 103 DeltaPHI_entropy_foot7 (0.000310)
256. feature 167 PE_moving_av_1ft (0.000301)
257. feature 126 ILD_log10gradient_dx4 (0.000293)
258. feature 149 PHINDgradient_dx6 (0.000293)
259. feature 48 GR_cD_level_2 (0.000289)
260. feature 102 DeltaPHI_entropy_foot5 (0.000280)
261. feature 118 GRgradient_dx3 (0.000279)
262. feature 15 DeltaPHI_cD_level_1 (0.000274)
263. feature 11 ILD_log10_cD_level_1 (0.000258)
264. feature 148 PHINDgradient_dx5 (0.000247)
265. feature 123 GRgradient_dx20 (0.000244)
266. feature 144 PEgradient_dx20 (0.000236)
267. feature 150 PHINDgradient_dx10 (0.000231)
268. feature 124 ILD_log10gradient_dx2 (0.000230)
269. feature 108 PE_entropy_foot5 (0.000223)
270. feature 142 PEgradient_dx6 (0.000223)
271. feature 185 ILD_log10_moving_std_4ft (0.000197)
272. feature 16 DeltaPHI_cD_level_2 (0.000194)
273. feature 135 DeltaPHIgradient_dx6 (0.000187)
274. feature 178 GR_moving_std_4ft (0.000178)
275. feature 20 PE_cD_level_2 (0.000170)
276. feature 59 PE_cD_level_1 (0.000169)
277. feature 130 ILD_log10gradient_dx20 (0.000166)
278. feature 23 PHIND_cD_level_1 (0.000166)
279. feature 137 DeltaPHIgradient_dx20 (0.000162)
280. feature 134 DeltaPHIgradient_dx5 (0.000160)
281. feature 133 DeltaPHIgradient_dx4 (0.000151)
282. feature 143 PEgradient_dx10 (0.000150)
283. feature 120 GRgradient_dx5 (0.000132)
284. feature 110 PE_entropy_foot10 (0.000127)
285. feature 106 PE_entropy_foot3 (0.000118)
286. feature 47 GR_cD_level_1 (0.000105)
287. feature 109 PE_entropy_foot7 (0.000104)
288. feature 184 ILD_log10_moving_std_3ft (0.000100)
289. feature 107 PE_entropy_foot4 (0.000093)
290. feature 177 GR_moving_std_3ft (0.000077)
291. feature 131 DeltaPHIgradient_dx2 (0.000072)
292. feature 199 PE_moving_std_4ft (0.000071)
293. feature 117 GRgradient_dx2 (0.000065)
294. feature 91 GR_entropy_foot7 (0.000041)
295. feature 105 PE_entropy_foot2 (0.000000)
###Markdown
Plot the feature importances of the forest
###Code
sns.set_style('white')
fig,ax = plt.subplots(figsize=(15,5))
ax.bar(range(X.shape[1]), importances[indices],color="r", align="center")
plt.ylabel("Feature importance")
plt.xlabel('Ranked features')
plt.xticks([], indices)
plt.xlim([-1, X.shape[1]]);
###Output
_____no_output_____
###Markdown
Features derived from raw geological variables tend to have the highest classification power. Rolling min, max and mean tend to have better classification power than raw data. Wavelet approximation coeficients tend to have a similar to lower classification power than raw data. Features expressing local texture of the data (entropy, gradient, standard deviation and wavelet detail coeficients) have a low classification power but still participate in the prediction. Confusion matrixThe confusion matrix from the validation test is presented below.
###Code
######## Confusion matrix from this tuning
cv=LeaveOneGroupOut().split(X, y, groups)
y_pred = cross_val_predict(Cl, X, y, cv=cv, n_jobs=-1)
conf_mat = confusion_matrix(y, y_pred)
list_facies = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS', 'WS', 'D', 'PS', 'BS']
conf_mat = pd.DataFrame(conf_mat, columns=list_facies, index=list_facies)
conf_mat.head(10)
###Output
_____no_output_____
###Markdown
Applying the classification model to test data
###Code
filename = '../validation_data_nofacies.csv'
test_data = pd.read_csv(filename)
test_data.head(5)
##### cD From wavelet db1
dwt_db1_cD_df = FE.make_dwt_vars_cD(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cA From wavelet db1
dwt_db1_cA_df = FE.make_dwt_vars_cA(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db1')
##### cD From wavelet db3
dwt_db3_cD_df = FE.make_dwt_vars_cD(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### cA From wavelet db3
dwt_db3_cA_df = FE.make_dwt_vars_cA(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
levels=[1, 2, 3, 4], wavelet='db3')
##### From entropy
entropy_df = FE.make_entropy_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
l_foots=[2, 3, 4, 5, 7, 10])
###### From gradient
gradient_df = FE.make_gradient_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
dx_list=[2, 3, 4, 5, 6, 10, 20])
##### From rolling average
moving_av_df = FE.make_moving_av_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[1, 2, 5, 10, 20])
##### From rolling standard deviation
moving_std_df = FE.make_moving_std_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
##### From rolling max
moving_max_df = FE.make_moving_max_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3, 4, 5, 7, 10, 15, 20])
##### From rolling min
moving_min_df = FE.make_moving_min_vars(wells_df=test_data, logs=['GR', 'ILD_log10', 'DeltaPHI', 'PE', 'PHIND'],
windows=[3 , 4, 5, 7, 10, 15, 20])
###### From rolling NM/M ratio
rolling_marine_ratio_df = FE.make_rolling_marine_ratio_vars(wells_df=test_data, windows=[5, 10, 15, 20, 30, 50, 75, 100, 200])
###### From distance to NM and M, up and down
dist_M_up_df = FE.make_distance_to_M_up_vars(wells_df=test_data)
dist_M_down_df = FE.make_distance_to_M_down_vars(wells_df=test_data)
dist_NM_up_df = FE.make_distance_to_NM_up_vars(wells_df=test_data)
dist_NM_down_df = FE.make_distance_to_NM_down_vars(wells_df=test_data)
combined_test_df = test_data
list_df_var = [dwt_db1_cD_df, dwt_db1_cA_df, dwt_db3_cD_df, dwt_db3_cA_df,
entropy_df, gradient_df, moving_av_df, moving_std_df, moving_max_df, moving_min_df,
rolling_marine_ratio_df, dist_M_up_df, dist_M_down_df, dist_NM_up_df, dist_NM_down_df]
for var_df in list_df_var:
temp_df = var_df
combined_test_df = pd.concat([combined_test_df,temp_df],axis=1)
combined_test_df.replace(to_replace=np.nan, value='-99999', inplace=True)
X_test = combined_test_df.iloc[:, 3:]
print (combined_test_df.shape)
combined_test_df.head(5)
Cl = RandomForestClassifier(n_estimators=100, max_features=0.1, min_samples_leaf=25,
min_samples_split=50, class_weight='balanced', random_state=42, n_jobs=-1)
Cl.fit(X, y)
y_test = Cl.predict(X_test)
y_test = pd.DataFrame(y_test, columns=['Predicted Facies'])
test_pred_df = pd.concat([combined_test_df[['Well Name', 'Depth']], y_test], axis=1)
test_pred_df.head()
###Output
_____no_output_____
###Markdown
Exporting results
###Code
test_pred_df.to_pickle('Prediction_blind_wells_RF_c.pkl')
###Output
_____no_output_____
|
src/01/01_inftroduction.ipynb
|
###Markdown
1 基本的なPythonの文法リボン左の`+` ボタンを押してセルを追加できます。セルの種類がCodeの場合はPythonのコードをMarkdownの場合はMarkdown形式でメモを取ることができます。自由に活用してください 1.1 電卓としてのPythonPythonの1番簡単な例は、電卓として使うことです。 サポートされている主な演算子は以下のものです。| 演算子 | 意味 || :----: | :----------- || + | 足し算 || - | 引き算 || * | 掛け算 || / | 割り算 || // | 切り捨て除算 || % | 剰余(あまり) || ** | べき乗 |
###Code
# セルにカーソルが入っている状態でshift + enter を押下するとセルを実行できます
# print() で囲むと出力できます
# シャープ以降の文字列はコメントとして扱われ実行されません
# シャープはctrl + / で1行をコメントアウトできます
print(1+2+3-4+5+6+78+9)
print(123-45-67+89)
print(1*2*3*4+5+6+7*8+9)
print(123+4-5+67-89)
# 演習1
# 遊んでみよう
###Output
_____no_output_____
###Markdown
1.2 変数と型 ほとんどのプログラミング言語にはデータ型(また、単に型)というシステムがあります。最初はありがたみに気づきにくいかもしれませんが、きっとこのシステムの素晴らしさに気づくはずです。Pythonでは、文字列を`"`、また、`'`で囲むことで表します。以下のセルを実行してみてください`"`, `'` で囲まれていない文字列は変数名や関数名と解釈されてしまうので気をつけてください
###Code
print(1, type(1))
print("1", type("1"))
###Output
_____no_output_____
###Markdown
`type` はあるデータの型を調べる関数です(関数については後述します)。Pythonには色々な型があります。以下にいくつかの例を示します。これらは、Pythonが標準で用意したもので、プリミティブ(原始的な)型と呼ばれます。| 型名 | 例 || :---- | --------: || int | -1 || float | 1.0 || str | "1" || bool | True || list | \[1, 1, 1\] ||dict| {"key" : value} |以下に型のありがたみを感じられる例を示します。セルを実行する前に結果を予想してみてください。
###Code
# int + int
print(1 + 1)
# str + str
print("1" + "1")
# int + str
print(1 + "1")
###Output
_____no_output_____
###Markdown
つい先程データの型についての話をしましたが、このままでは、計算した結果を値を保存できません。プログラム内でデータに名前をつけて拘束するシステムが変数です。Pythonは基本的に変数の宣言と代入を同時に行います。最も単純な例を示します。
###Code
name = "value"
print(name)
# 型ヒントを使って型を明示することもできます(残念なことに強制力はないです)
name: str = "value"
print(name)
###Output
_____no_output_____
###Markdown
代入演算子と算術演算子の複合。例えば、以下のようなコードがあります```pythonx = x + 1```このようなコードはよく使うため、以下のように短縮して書くことができます```pythonx += 1```この `+=` を加算代入演算子といいます。自身を対象に演算を行い、代入する場合に使うことができます 一般に、ある変数`x`と`y`を入れ替える際に、以下のような書き方をします```pythontmp = xx = yy = tmp```一行目で `x = y` としてしまうと、中身を入れ替えることができなくなります。Pythonでは、これをもっと手軽に書くことができます```pythonx, y = y , x```これをアンパック代入といいます。アンパックについては、タプルを紹介するときに詳しく述べることにします。 1.3 リストあなたは変数という概念を用いて、データを名前に紐づけて保存する方法を学びました。例えばコーヒー豆の産地を列挙したいと考えます。例えば今までの知識では、
###Code
coffee_from_1 = "コロンビア"
coffee_from_2 = "ガテマラ"
coffee_from_3 = "ペルー"
print(coffee_from_1, coffee_from_2, coffee_from_3)
###Output
_____no_output_____
###Markdown
とする方法が考えられます。しかし、これは賢い方法には思えません。そこで使うのが list型 です。`[ ]` の中に、要素を列挙してひとまとめにすることができます。要素は任意のデータを対象にできます。もちろん空のデータも。
###Code
coffee_from = ["コロンビア", "ガテマラ", "ペルー"]
print(coffee_from)
###Output
_____no_output_____
###Markdown
listの要素にlistを適用することができます。これを2次元リストといいます。通常のリストは1次元リストと言い換えることができます。また、同様にn次元リストが定義されます
###Code
mahoujin = [[8, 1, 6], [3, 5 ,7], [4, 9, 2]]
# print(mahoujin)
# for item in mahoujin:
# print(item)
###Output
_____no_output_____
###Markdown
1.4 標準入出力出力を行う関数は、上でさんざん使用した `print()` です。 与えられた値を改行ありで出力します。入力を行う関数は `input()` です。とりあえず叩いて見ましょう
###Code
# x という変数にinput() の返り値を代入してください
# x を出力してください
###Output
_____no_output_____
###Markdown
ところで、次のコードの結果はどうなるでしょうか。z の結果を予測してみてください。また、どうしたら望んだ結果になるでしょうか
###Code
x = input()
y = input()
z = x + y
print(x, y, z)
# 演習
# あなたの苗字を family_name という変数に
# 名前を last_name という変数にそれぞれ標準入力から入力、代入します
# name という変数に、苗字と名前を結合して出力してください
###Output
_____no_output_____
###Markdown
1.5 関数あなたは恐らく数学で関数を習ったはずです。例えば、"x が1つに決まると y がただ1つに決まる関係" という定義。また、集合論から定義するならば、"ある集合の元(要素)を別の集合の元に対応付ける規則"のことです。プログラミングの文脈での関数とは、後者に近いものです。ここでは、**任意個の引数(ひきすう)を受け取り、何らかの処理を行い、任意個の返り値を返すもの** としておきましょう。例えば、先程でてきた `input()` は、0個の引数、または、1つのstr型の引数を受け取り、受け取った文字列を、1つのstr型の値として返します。これをPython風に書くと、`input(arg: Option[str]) -> str` です。Optionはあってもなくてもいいという意味です。実際に動かしてみてみましょう
###Code
# input() -> str
print(input())
# input(str) -> str
print(input("this is input:"))
###Output
_____no_output_____
###Markdown
ここで、1.4 で紹介したinput()の返り値同士の足し算を考えてみましょう。あなたはすでにinputの返り値がstrであることを知っています。これを整数、つまりint型として扱いたいときはどうしたらいいでしょうか。これのために便利な仕組みが用意されています。**型キャスト** (また、単にキャスト) というものです。str型のデータをint型のデータに変換するのは簡単です。`int()` という関数を使います。
###Code
x = "1"
y = "2"
print(x + y)
# x, yをそれぞれint型にキャストして、3が出力されるように変更しましょう
int_x = x
int_y = y
print(int_x + int_y)
###Output
_____no_output_____
###Markdown
1.6 関数定義Pythonはすでに用意された関数がたくさんありますが、私達ユーザが独自に関数を定義することもできます。構文は、```pythondef 関数名(仮引数): 処理 return 返り値```です。型ヒントを用いた完全なパターンは```pythondef 関数名(仮引数1: 仮引数1の型, 仮引数2: 仮引数2の型 ...) -> 返り値の型: 処理 return 返り値```個人的には、型ヒントを用いた方を推奨します。ここで注意してほしいのが、インデントです。defと書いてある行の次の行以降の行頭が下がっています。これをインデントといいます(通常は半角スペース4つ)。インデントは意味のブロックを表します。なので、インデントが崩れるとエラーになります。例えば関数定義では、returnというキーワードを見つけるまで。もしくは、インデントがdefと同階層になるまでを関数定義とします
###Code
# int 型の引数を1つ受け取り、2倍にして返す関数
def twice(n: int) -> int:
return 2 * n
print(twice(3))
# 演習
# str型のfamily と lastという仮引数を受け取り、間に空白を入れながら結合して返す関数を定義してください
###Output
_____no_output_____
|
ai-platform-tensorflow/image_tf.ipynb
|
###Markdown
Setup
###Code
# Get the project id
proj_id = !gcloud config list project --format "value(core.project)"
proj_id[0]
USER = 'cchatterj'
PROJECT_ID = proj_id[0]
BUCKET_NAME = 'tuti_asset' #Use a unique name
FOLDER_RESULTS = 'tf_models'
REGION = 'us-central1'
ZONE1 = 'us-central1-a'
RUNTIME_VERSION = 2.1
JOB_DIR = 'gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/jobdir'
MODEL_DIR = 'gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/models'
!gcloud config set project $PROJECT_ID
!gcloud config set compute/zone $ZONE1
!gcloud config set compute/region $REGION
!gcloud config list
#!gcloud config config-helper --format "value(configuration.properties.core.project)"
# Clean old job logs, job packages and models
!gsutil -m -q rm $JOB_DIR/packages/**
!gsutil -m -q rm $MODEL_DIR/model**
###Output
_____no_output_____
###Markdown
ML Model
###Code
# Create the tf_trainer directory and load the trainer files in it
!mkdir -p trainer
%%writefile ./trainer/inputs.py
# Create the train and label lists
import math
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
#------
def load_data():
"""Creates train and test data set"""
mnist = tf.keras.datasets.mnist
(X_train, y_train),(X_test, y_test) = mnist.load_data()
X_train, X_test = X_train / 255.0, X_test / 255.0
# Check the shape
print("X_train shape = ", X_train.shape)
print("X_test shape = ", X_test.shape)
print("y_train shape = ", y_train.shape)
print("y_test shape = ", y_test.shape)
return [X_train, X_test, y_train, y_test]
from trainer import inputs
train_test_data = inputs.load_data()
X_test = train_test_data[1]
plot_images(X_test)
%%writefile ./trainer/model.py
import tensorflow as tf
import numpy as np
def tf_model(input_shape, model_depth: int = 1, dropout_rate: float = 0.02):
"""Creates the keras model used by task to train the model."""
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Dropout
model = Sequential()
model.add(Flatten(input_shape=input_shape))
for i in range(0,model_depth):
nUnits = 28*28-(i+1)*((28*28-128)//model_depth)
model.add(Dense(nUnits, activation='relu'))
model.add(Dropout(dropout_rate))
model.add(Dense(10, activation='softmax'))
print(model.summary())
return model
def custom_loss(y_true, y_pred):
custom_loss = mean(square(y_true - y_pred), axis=-1)
return custom_loss
def custom_metric(y_true, y_pred):
custom_metric = mean(square(y_true - y_pred), axis=-1)
return custom_metric
###Output
Overwriting ./trainer/model.py
###Markdown
Package for distributed training
###Code
%%writefile ./setup.py
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
# https://cloud.google.com/ai-platform/training/docs/runtime-version-list
from setuptools import find_packages
from setuptools import setup
#Runtime 2.1
REQUIRED_PACKAGES = ['tensorflow==2.1.0',
'pandas==0.25.3',
'scikit-learn==0.22',
'google-cloud-storage==1.23.0',
'gcsfs==0.6.1',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='Trainer package for Tensorflow Task'
)
###Output
Overwriting ./setup.py
###Markdown
Training functions
###Code
%%writefile ./trainer/__init__.py
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
%%writefile ./trainer/train.py
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
import os
import json
import tensorflow as tf
import numpy as np
import datetime as datetime
from pytz import timezone
import hypertune
import argparse
from trainer import model
from trainer import inputs
import warnings
warnings.filterwarnings("ignore")
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
#0 = all messages are logged (default behavior)
#1 = INFO messages are not printed
#2 = INFO and WARNING messages are not printed
#3 = INFO, WARNING, and ERROR messages are not printed
def parse_arguments():
"""Argument parser.
Returns:
Dictionary of arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--model_depth', default=1, type=int,
help='Hyperparameter: depth of the model')
parser.add_argument('--dropout_rate', default=0.02, type=float,
help='Hyperparameter: Drop out rate')
parser.add_argument('--learning_rate', default=0.0001, type=float,
help='Hyperparameter: initial learning rate')
parser.add_argument('--epochs', default=2, type=int,
help='Hyperparameter: epoch.')
parser.add_argument('--batch_size', default=4, type=int,
help='batch size of the deep network')
parser.add_argument('--model_dir', default="",
help='Directory to store model checkpoints and logs.')
parser.add_argument('--verbosity',choices=['DEBUG', 'ERROR', 'FATAL', 'INFO', 'WARN'],
default='FATAL')
args, _ = parser.parse_known_args()
return args
def get_callbacks(args, early_stop_patience: int = 3):
"""Creates Keras callbacks for model training."""
# Get trialId
trialId = json.loads(os.environ.get("TF_CONFIG", "{}")).get("task", {}).get("trial", "")
if trialId == '':
trialId = '0'
print("trialId=", trialId)
curTime = datetime.datetime.now(timezone('US/Pacific')).strftime('%H%M%S')
# Modify model_dir paths to include trialId
model_dir = args.model_dir + "/checkpoints/cp-"+curTime+"-"+trialId+"-{val_accuracy:.4f}"
log_dir = args.model_dir + "/log_dir"
tensorboard_cb = tf.keras.callbacks.TensorBoard(log_dir, histogram_freq=1)
checkpoint_cb = tf.keras.callbacks.ModelCheckpoint(model_dir, monitor='val_accuracy', mode='max',
verbose=0, save_best_only=True,
save_weights_only=False)
earlystop_cb = tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=3)
return [checkpoint_cb, tensorboard_cb, earlystop_cb]
if __name__ == "__main__":
# ---------------------------------------
# Parse Arguments
# ---------------------------------------
args = parse_arguments()
#args.model_dir = MODEL_DIR + datetime.datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')
print(args)
# ---------------------------------------
# Input Data & Preprocessing
# ---------------------------------------
print("Input and pre-process data ...")
# Extract train_seismic, train_label
train_test_data = inputs.load_data()
X_train = train_test_data[0]
X_test = train_test_data[1]
y_train = train_test_data[2]
y_test = train_test_data[3]
# ---------------------------------------
# Train model
# ---------------------------------------
print("Creating model ...")
tf_model = model.tf_model((28,28), model_depth=args.model_depth,
dropout_rate=args.dropout_rate)
optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate)
tf_model.compile(optimizer=optimizer,
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
print("Fitting model ...")
callbacks = get_callbacks(args, 3)
histy = tf_model.fit(x=X_train, y=y_train,
epochs=args.epochs,
batch_size=args.batch_size,
validation_data=(X_test, y_test),
callbacks=callbacks)
# TBD save history for visualization
final_epoch_accuracy = histy.history['accuracy'][-1]
final_epoch_count = len(histy.history['accuracy'])
print('final_epoch_accuracy = %.6f' % final_epoch_accuracy)
print('final_epoch_count = %2d' % final_epoch_count)
%%time
# Run the training manually
# Training parameters
from datetime import datetime
from pytz import timezone
MODEL_DEPTH = 2
DROPOUT_RATE = 0.01
LEARNING_RATE = 0.00005
EPOCHS = 1
BATCH_SIZE = 32
MODEL_DIR_PYTH = MODEL_DIR + datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')
print('MODEL_DEPTH = %2d' % MODEL_DEPTH)
print('DROPOUT_RATE = %.4f' % DROPOUT_RATE)
print('LEARNING_RATE = %.6f' % LEARNING_RATE)
print('EPOCHS = %2d' % EPOCHS)
print('BATCH_SIZE = %2d' % BATCH_SIZE)
print("MODEL_DIR =", MODEL_DIR_PYTH)
# Run training
! python3 -m trainer.train \
--model_depth=$MODEL_DEPTH \
--dropout_rate=$DROPOUT_RATE \
--learning_rate=$LEARNING_RATE \
--epochs=$EPOCHS \
--batch_size=$BATCH_SIZE \
--model_dir=$MODEL_DIR_PYTH
# Test with latest saved model
best_model_dir_pyth = find_best_model_dir(MODEL_DIR_PYTH+'/checkpoints', offset=1, maxFlag=1)
#acc = test_saved_model(best_model_dir_pyth, 0)
%%time
#***CREATE model_dir in local VM***
!mkdir -p model_dir
from trainer import model
# Copy the model from storage to local memory
!gsutil -m cp -r $best_model_dir_pyth* ./model_dir
# Load the model
loaded_model = tf.keras.models.load_model('./model_dir', compile=False)#,
#custom_objects={"custom_loss": model.custom_loss, "custom_mse": model.custom_mse})
print("Signature ", loaded_model.signatures)
print("")
# Display model
tf.keras.utils.plot_model(loaded_model, show_shapes=True)
###Output
Copying gs://tuti_asset/tf_models/models/model_05092021_2306/checkpoints/cp-230608-0-0.9313/saved_model.pb...
Copying gs://tuti_asset/tf_models/models/model_05092021_2306/checkpoints/cp-230608-0-0.9313/variables/variables.index...
Copying gs://tuti_asset/tf_models/models/model_05092021_2306/checkpoints/cp-230608-0-0.9313/variables/variables.data-00000-of-00001...
Signature _SignatureMap({'serving_default': <tensorflow.python.saved_model.load._WrapperFunction object at 0x7f8754754dd0>})
CPU times: user 386 ms, sys: 79.4 ms, total: 466 ms
Wall time: 2.28 s
###Markdown
------ Training
###Code
# Create the config directory and load the trainer files in it
!mkdir -p config
%%writefile ./config/config.yaml
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
# https://cloud.google.com/sdk/gcloud/reference/ai-platform/jobs/submit/training#--scale-tier
# https://www.kaggle.com/c/passenger-screening-algorithm-challenge/discussion/37087
# https://cloud.google.com/ai-platform/training/docs/using-gpus
#trainingInput:
# scaleTier: CUSTOM
# masterType: n1-highmem-16
# masterConfig:
# acceleratorConfig:
# count: 2
# type: NVIDIA_TESLA_V100
#trainingInput:
# scaleTier: CUSTOM
# masterType: n1-highmem-8
# masterConfig:
# acceleratorConfig:
# count: 1
# type: NVIDIA_TESLA_T4
# masterType: n1-highcpu-16
# workerType: cloud_tpu
# workerCount: 1
# workerConfig:
# acceleratorConfig:
# type: TPU_V3
# count: 8
#trainingInput:
# scaleTier: CUSTOM
# masterType: complex_model_m
# workerType: complex_model_m
# parameterServerType: large_model
# workerCount: 6
# parameterServerCount: 1
# scheduling:
# maxWaitTime: 3600s
# maxRunningTime: 7200s
#trainingInput:
# runtimeVersion: "2.1"
# scaleTier: CUSTOM
# masterType: standard_gpu
# workerCount: 9
# workerType: standard_gpu
# parameterServerCount: 3
# parameterServerType: standard
#trainingInput:
# scaleTier: BASIC-GPU
#trainingInput:
# region: us-central1
# scaleTier: CUSTOM
# masterType: complex_model_m
# workerType: complex_model_m_gpu
# parameterServerType: large_model
# workerCount: 4
# parameterServerCount: 2
trainingInput:
scaleTier: standard-1
from datetime import datetime
from pytz import timezone
JOBNAME_TRN = 'tf_train_'+ USER + '_' + \
datetime.now(timezone('US/Pacific')).strftime("%m%d%y_%H%M")
JOB_CONFIG = "config/config.yaml"
MODEL_DIR_TRN = MODEL_DIR + datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')
print("Job Name = ", JOBNAME_TRN)
print("Job Dir = ", JOB_DIR)
print("MODEL_DIR =", MODEL_DIR_TRN)
# Training parameters
MODEL_DEPTH = 3
DROPOUT_RATE = 0.02
LEARNING_RATE = 0.0001
EPOCHS = 2
BATCH_SIZE = 32
print('MODEL_DEPTH = %2d' % MODEL_DEPTH)
print('DROPOUT_RATE = %.4f' % DROPOUT_RATE)
print('LEARNING_RATE = %.6f' % LEARNING_RATE)
print('EPOCHS = %2d' % EPOCHS)
print('BATCH_SIZE = %2d' % BATCH_SIZE)
# https://cloud.google.com/sdk/gcloud/reference/ai-platform/jobs/submit/training
TRAIN_LABELS = "mode=train,owner="+USER
# submit the training job
! gcloud ai-platform jobs submit training $JOBNAME_TRN \
--package-path $(pwd)/trainer \
--module-name trainer.train \
--region $REGION \
--python-version 3.7 \
--runtime-version $RUNTIME_VERSION \
--job-dir $JOB_DIR \
--config $JOB_CONFIG \
--labels $TRAIN_LABELS \
-- \
--model_depth=$MODEL_DEPTH \
--dropout_rate=$DROPOUT_RATE \
--learning_rate=$LEARNING_RATE \
--epochs=$EPOCHS \
--batch_size=$BATCH_SIZE \
--model_dir=$MODEL_DIR_TRN
# check the training job status
! gcloud ai-platform jobs describe $JOBNAME_TRN
# Print Errors
#response = ! gcloud logging read "resource.labels.job_id=$JOBNAME_TRN severity>=ERROR"
#for i in range(0,len(response)):
# if 'message' in response[i]:
# print(response[i])
# Test with latest saved model
best_model_dir_trn = find_best_model_dir(MODEL_DIR_TRN+'/checkpoints', offset=1, maxFlag=1)
#acc = test_saved_model(best_model_dir_trn, 0)
###Output
All Models =
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9527/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9529/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9641/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9679/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173619-0-0.9551/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173619-0-0.9677/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173621-0-0.9512/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173621-0-0.9686/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173623-0-0.9539/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173623-0-0.9662/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173626-0-0.9536/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173626-0-0.9668/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173633-0-0.9549/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173633-0-0.9683/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173709-0-0.9540/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173621-0-0.9512/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9527/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9529/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173626-0-0.9536/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173623-0-0.9539/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173709-0-0.9540/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173633-0-0.9549/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173619-0-0.9551/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9641/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173623-0-0.9662/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173626-0-0.9668/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173619-0-0.9677/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173614-0-0.9679/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173633-0-0.9683/
gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173621-0-0.9686/
Best Accuracy from Checkpoints = 0.9686
Best Model Dir from Checkpoints = gs://tuti_asset/tf_models/models/model_05072021_1727/checkpoints/cp-173621-0-0.9686/
###Markdown
------ Hyper Parameter Tuning
###Code
# Create the tf directory and load the trainer files in it
!cp ./trainer/train.py ./trainer/train_hpt.py
%%writefile -a ./trainer/train_hpt.py
"""This method updates a CAIP HPTuning Job with a final metric for the job.
In TF2.X the user must either use hypertune or a custom callback with
tf.summary.scalar to update CAIP HP Tuning jobs. This function uses
hypertune, which appears to be the preferred solution. Hypertune also works
with containers, without code change.
Args:
metric_tag: The metric being optimized. This MUST MATCH the
hyperparameterMetricTag specificed in the hyperparameter tuning yaml.
metric_value: The value to report at the end of model training.
global_step: An int value to specify the number of trainin steps completed
at the time the metric was reported.
"""
hpt = hypertune.HyperTune()
hpt.report_hyperparameter_tuning_metric(
hyperparameter_metric_tag='accuracy',
metric_value=final_epoch_accuracy,
global_step=final_epoch_count
)
%%writefile ./config/hptuning_config.yaml
# python3
# ==============================================================================
# Copyright 2020 Google LLC. This software is provided as-is, without warranty
# or representation for any use or purpose. Your use of it is subject to your
# agreement with Google.
# ==============================================================================
# https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs
# https://cloud.google.com/sdk/gcloud/reference/ai-platform/jobs/submit/training
#trainingInput:
# scaleTier: CUSTOM
# masterType: n1-highmem-8
# masterConfig:
# acceleratorConfig:
# count: 1
# type: NVIDIA_TESLA_T4
#
# masterType: standard_p100
# workerType: standard_p100
# parameterServerType: standard_p100
# workerCount: 8
# parameterServerCount: 1
# runtimeVersion: $RUNTIME_VERSION
# pythonVersion: '3.7'
#trainingInput:
# scaleTier: CUSTOM
# masterType: complex_model_m
# workerType: complex_model_m
# parameterServerType: large_model
# workerCount: 9
# parameterServerCount: 3
# scheduling:
# maxWaitTime: 3600s
# maxRunningTime: 7200s
#trainingInput:
# scaleTier: BASIC-GPU
#trainingInput:
# scaleTier: CUSTOM
# masterType: n1-highmem-16
# masterConfig:
# acceleratorConfig:
# count: 2
# type: NVIDIA_TESLA_V100
trainingInput:
scaleTier: STANDARD-1
hyperparameters:
goal: MAXIMIZE
hyperparameterMetricTag: accuracy
maxTrials: 4
maxParallelTrials: 4
enableTrialEarlyStopping: True
params:
- parameterName: model_depth
type: INTEGER
minValue: 2
maxValue: 4
scaleType: UNIT_LINEAR_SCALE
- parameterName: epochs
type: INTEGER
minValue: 1
maxValue: 3
scaleType: UNIT_LINEAR_SCALE
from datetime import datetime
from pytz import timezone
JOBNAME_HPT = 'tf_hptrn_' + USER + '_' + \
datetime.now(timezone('US/Pacific')).strftime("%m%d%y_%H%M")
JOB_CONFIG = "./config/hptuning_config.yaml"
MODEL_DIR_HPT = MODEL_DIR + datetime.now(timezone('US/Pacific')).strftime('/model_%m%d%Y_%H%M')
print("Job Name = ", JOBNAME_HPT)
print("Job Dir = ", JOB_DIR)
print("MODEL_DIR =", MODEL_DIR_HPT)
# Training parameters
DROPOUT_RATE = 0.02
LEARNING_RATE = 0.0001
BATCH_SIZE = 32
# submit the training job
HT_LABELS = "mode=hypertrain,owner="+USER
! gcloud ai-platform jobs submit training $JOBNAME_HPT \
--package-path $(pwd)/trainer \
--module-name trainer.train_hpt \
--python-version 3.7 \
--runtime-version $RUNTIME_VERSION \
--region $REGION \
--job-dir $JOB_DIR \
--config $JOB_CONFIG \
--labels $HT_LABELS \
-- \
--dropout_rate=$DROPOUT_RATE \
--learning_rate=$LEARNING_RATE \
--batch_size=$BATCH_SIZE \
--model_dir=$MODEL_DIR_HPT
# check the hyperparameter training job status
! gcloud ai-platform jobs describe $JOBNAME_HPT
# Print Errors
#response = ! gcloud logging read "resource.labels.job_id=$JOBNAME_HPT severity>=ERROR"
#for i in range(0,len(response)):
# if 'message' in response[i]:
# print(response[i])
# Get the best model parameters from Cloud API
best_model = pyth_get_hypertuned_parameters(PROJECT_ID, JOBNAME_HPT, 1)
MODEL_DEPTH = best_model['hyperparameters']['model_depth']
EPOCHS = best_model['hyperparameters']['epochs']
print('')
print('Objective=', best_model['finalMetric']['objectiveValue'])
print('MODEL_DEPTH =', MODEL_DEPTH)
print('EPOCHS =', EPOCHS)
# Find count of checkpoints
all_models = ! gsutil ls {MODEL_DIR_HPT+'/checkpoints'}
print("Total Hypertrained Models=", len(all_models))
# Test with latest saved model
best_model_dir_hyp = find_best_model_dir(MODEL_DIR_HPT+'/checkpoints', offset=1, maxFlag=1)
#acc = test_saved_model(best_model_dir_hyp, 0)
#import keras.backend as K
#loaded_model = tf.keras.models.load_model(MODEL_DIR_PARAM+'/checkpoints')
#print("learning_rate=", K.eval(loaded_model.optimizer.lr))
#tf.keras.utils.plot_model(loaded_model, show_shapes=True)
###Output
Total Hypertrained Models= 138
All Models =
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180403-3-0.9558/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180403-3-0.9593/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180404-3-0.9511/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180405-3-0.9554/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180413-3-0.9572/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180414-3-0.9613/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180415-3-0.9595/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180417-1-0.9519/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180417-1-0.9657/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180417-1-0.9726/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180419-2-0.9427/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180419-2-0.9612/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180419-2-0.9663/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180425-1-0.9537/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180425-1-0.9592/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180425-1-0.9731/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180426-1-0.9551/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180426-1-0.9670/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180426-1-0.9695/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180432-2-0.9445/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180432-2-0.9603/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180432-2-0.9676/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180434-1-0.9551/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180434-1-0.9684/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180434-1-0.9699/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180435-2-0.9440/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180435-2-0.9591/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180435-2-0.9662/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9580/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9696/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9756/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9791/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9599/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9704/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9739/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9773/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9589/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9690/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9755/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9777/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180503-4-0.9577/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180503-4-0.9694/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180503-4-0.9767/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-2-0.9477/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-2-0.9605/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-2-0.9695/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-4-0.9603/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-4-0.9697/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-4-0.9732/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9576/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9692/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9740/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9775/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180509-2-0.9444/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180509-2-0.9597/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180509-2-0.9672/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9566/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9598/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9676/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9717/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9743/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9764/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180517-2-0.9458/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180517-2-0.9604/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180517-2-0.9670/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9538/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9544/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9680/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9689/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9736/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9739/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-1-0.9563/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-1-0.9666/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-1-0.9751/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-2-0.9426/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-2-0.9599/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-2-0.9674/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180523-2-0.9407/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180523-2-0.9569/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180523-2-0.9653/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180529-1-0.9531/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180529-1-0.9678/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9552/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9589/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9604/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9666/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9708/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9710/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180836-5-0.9584/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180836-5-0.9704/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180839-5-0.9592/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180839-5-0.9720/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180841-5-0.9590/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180841-5-0.9681/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180848-5-0.9601/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180848-5-0.9703/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180853-8-0.9559/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180856-8-0.9549/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180857-8-0.9536/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180912-8-0.9572/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180914-8-0.9475/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180915-8-0.9544/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180916-8-0.9507/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180924-8-0.9521/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181255-6-0.9579/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181255-6-0.9696/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181255-6-0.9737/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181317-7-0.9439/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181319-6-0.9588/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181319-6-0.9707/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181319-6-0.9751/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181329-6-0.9608/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181329-6-0.9720/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181329-6-0.9736/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181330-7-0.9431/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181331-7-0.9439/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-6-0.9606/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-6-0.9719/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-6-0.9732/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-7-0.9437/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9588/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9598/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9649/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9701/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9754/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9766/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181339-6-0.9583/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181339-6-0.9697/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181339-6-0.9753/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-5-0.9569/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-5-0.9656/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-6-0.9591/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-6-0.9717/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-6-0.9746/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181344-7-0.9453/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181346-7-0.9431/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181351-7-0.9435/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180523-2-0.9407/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-2-0.9426/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180419-2-0.9427/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181330-7-0.9431/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181346-7-0.9431/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181351-7-0.9435/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-7-0.9437/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181317-7-0.9439/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181331-7-0.9439/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180435-2-0.9440/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180509-2-0.9444/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180432-2-0.9445/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181344-7-0.9453/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180517-2-0.9458/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180914-8-0.9475/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-2-0.9477/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180916-8-0.9507/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180404-3-0.9511/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180417-1-0.9519/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180924-8-0.9521/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180529-1-0.9531/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180857-8-0.9536/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180425-1-0.9537/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9538/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9544/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180915-8-0.9544/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180856-8-0.9549/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180426-1-0.9551/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180434-1-0.9551/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9552/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180405-3-0.9554/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180403-3-0.9558/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180853-8-0.9559/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-1-0.9563/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9566/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180523-2-0.9569/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-5-0.9569/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180413-3-0.9572/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180912-8-0.9572/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9576/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180503-4-0.9577/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181255-6-0.9579/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9580/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181339-6-0.9583/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180836-5-0.9584/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181319-6-0.9588/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9588/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9589/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9589/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180841-5-0.9590/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180435-2-0.9591/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-6-0.9591/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180425-1-0.9592/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180839-5-0.9592/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180403-3-0.9593/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180415-3-0.9595/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180509-2-0.9597/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9598/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9598/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9599/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-2-0.9599/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180848-5-0.9601/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180432-2-0.9603/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-4-0.9603/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180517-2-0.9604/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9604/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-2-0.9605/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-6-0.9606/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181329-6-0.9608/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180419-2-0.9612/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180414-3-0.9613/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9649/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180523-2-0.9653/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-5-0.9656/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180417-1-0.9657/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180435-2-0.9662/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180419-2-0.9663/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-1-0.9666/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9666/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180426-1-0.9670/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180517-2-0.9670/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180509-2-0.9672/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-2-0.9674/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180432-2-0.9676/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9676/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180529-1-0.9678/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9680/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180841-5-0.9681/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180434-1-0.9684/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9689/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9690/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9692/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180503-4-0.9694/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180426-1-0.9695/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-2-0.9695/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9696/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181255-6-0.9696/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-4-0.9697/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181339-6-0.9697/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180434-1-0.9699/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9701/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180848-5-0.9703/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9704/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180836-5-0.9704/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181319-6-0.9707/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9708/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180816-5-0.9710/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9717/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-6-0.9717/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-6-0.9719/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180839-5-0.9720/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181329-6-0.9720/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180417-1-0.9726/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180425-1-0.9731/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180504-4-0.9732/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181334-6-0.9732/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9736/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181329-6-0.9736/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181255-6-0.9737/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9739/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180519-1-0.9739/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9740/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9743/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181341-6-0.9746/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180520-1-0.9751/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181319-6-0.9751/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181339-6-0.9753/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9754/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9755/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9756/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180510-4-0.9764/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-181336-6-0.9766/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180503-4-0.9767/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180456-4-0.9773/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180505-4-0.9775/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180502-4-0.9777/
gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9791/
Best Accuracy from Checkpoints = 0.9791
Best Model Dir from Checkpoints = gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9791/
###Markdown
-------- Deploy the Model
###Code
## https://cloud.google.com/ai-platform/prediction/docs/machine-types-online-prediction#available_machine_types
# We need 2 versions of the same model:
# 1. Batch prediction model deployed on a mls1-c1-m2 cluster
# 2. Online prediction model deployed on a n1-standard-16 cluster
# Batch prediction does not support GPU and n1-standard-16 clusters.
# Run the Deploy Model section twice:
# 1. As a BATCH Mode version use MODEL_VERSION = MODEL_VERSION_BATCH
# 2. As a ONLINE Mode version use MODEL_VERSION = MODEL_VERSION_ONLINE
# Regional End points with python
#https://cloud.google.com/ai-platform/prediction/docs/regional-endpoints#python
MODEL_NAME = "mnist_model_1"
MODEL_VERSION_ONLINE = "online_v1"
MODEL_VERSION_BATCH = "batch_v1"
#Run this as Batch first then Online
#MODEL_VERSION = MODEL_VERSION_ONLINE
MODEL_VERSION = MODEL_VERSION_BATCH
# List all models
print("\nList of Models in Global Endpoint)")
!gcloud ai-platform models list --region=global
# List all versions of model
print("\nList of Versions in Global Endpoint)")
!gcloud ai-platform versions list --model $MODEL_NAME --region=global
#!gcloud ai-platform versions delete $MODEL_VERSION_ONLINE --model $MODEL_NAME --quiet --region=global
#!gcloud ai-platform models delete $MODEL_NAME --quiet --region=global
# List all models
print("\nList of Models in Global Endpoint)")
!gcloud ai-platform models list --region=global
# List all versions of model
print("\nList of Versions in Global Endpoint)")
!gcloud ai-platform versions list --model $MODEL_NAME --region=global
# create the model if it doesn't already exist
modelname = !gcloud ai-platform models list | grep -w $MODEL_NAME
print(modelname)
if (len(modelname) <= 1) or ('Listed 0 items.' in modelname[1]):
print("Creating model " + MODEL_NAME)
# Global endpoint
!gcloud ai-platform models create $MODEL_NAME --enable-logging --regions $REGION
else:
print("Model " + MODEL_NAME + " exist")
print("\nList of Models in Global Endpoint)")
!gcloud ai-platform models list --region=global
%%time
print("Model Name =", MODEL_NAME)
print("Model Versions =", MODEL_VERSION)
# Get a list of model directories
best_model_dir = best_model_dir_hyp
print("Best Model Dir: ", best_model_dir)
MODEL_FRAMEWORK = "TENSORFLOW"
MODEL_DESCRIPTION = "SEQ_MODEL_1"
MODEL_LABELS="team=ourteam,phase=test,owner="+USER
MACHINE_TYPE = "mls1-c1-m2"
if (MODEL_VERSION == MODEL_VERSION_BATCH):
MACHINE_TYPE = "mls1-c1-m2"
MODEL_LABELS = MODEL_LABELS+",mode=batch"
if (MODEL_VERSION == MODEL_VERSION_ONLINE):
MACHINE_TYPE = "mls1-c1-m2" #"n1-standard-32"
MODEL_LABELS = MODEL_LABELS+",mode=online"
# Deploy the model
! gcloud beta ai-platform versions create $MODEL_VERSION \
--model $MODEL_NAME \
--origin $best_model_dir \
--runtime-version $RUNTIME_VERSION \
--python-version=3.7 \
--description=$MODEL_DESCRIPTION \
--labels $MODEL_LABELS \
--machine-type=$MACHINE_TYPE \
--framework $MODEL_FRAMEWORK \
--region global
# List all models
print("\nList of Models in Global Endpoint)")
!gcloud ai-platform models list --region=global
print("\nList of Models in Regional Endpoint)")
!gcloud ai-platform models list --region=$REGION
# List all versions of model
print("\nList of Versions in Global Endpoint)")
!gcloud ai-platform versions list --model $MODEL_NAME --region=global
#print("\nList of Versions in Regional Endpoint)")
#!gcloud ai-platform versions list --model $MODEL_NAME --region=$REGION
###Output
List of Models in Global Endpoint)
Using endpoint [https://ml.googleapis.com/]
NAME DEFAULT_VERSION_NAME
kfp_xgb_model kfp_xgb_bst_v0_1
loan_model_1
mnist_model_1 online_v1
xgb_model elvinzhu_xgb_bst
List of Models in Regional Endpoint)
Using endpoint [https://us-central1-ml.googleapis.com/]
Listed 0 items.
List of Versions in Global Endpoint)
Using endpoint [https://ml.googleapis.com/]
NAME DEPLOYMENT_URI STATE
batch_v1 gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9791/ READY
online_v1 gs://tuti_asset/tf_models/models/model_05072021_1755/checkpoints/cp-180447-4-0.9791/ READY
###Markdown
------ Predictions with the deployed model
###Code
%%time
from trainer import model
# Copy the model from storage to local memory
!gsutil -m cp -r $best_model_dir_hyp* ./model_dir
# Load the model
loaded_model = tf.keras.models.load_model('./model_dir', compile=False) #,
#custom_objects={"custom_loss": model.custom_loss,"custom_mse": model.custom_mse})
print("Signature ", loaded_model.signatures)
# Check the model layers
model_layers = [layer.name for layer in loaded_model.layers]
print("")
print("Model Input Layer=", model_layers[0])
print("Model Output Layer=", model_layers[-1])
print("")
from trainer import inputs
train_test_data = inputs.load_data()
X_test = train_test_data[1]
y_test = train_test_data[3]
###Output
X_train shape = (60000, 28, 28)
X_test shape = (10000, 28, 28)
y_train shape = (60000,)
y_test shape = (10000,)
###Markdown
Online Prediction with python
###Code
%%time
# Online Prediction with Python - works for global end points only
# Use MODEL_VERSION_ONLINE not MODEL_VERSION_BATCH
MODEL_VERSION = MODEL_VERSION_ONLINE
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
from googleapiclient import errors
import json
#tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
#tf.get_logger().setLevel('ERROR')
print("Project ID =", PROJECT_ID)
print("Model Name =", MODEL_NAME)
print("Model Version =", MODEL_VERSION)
model_name = 'projects/{}/models/{}'.format(PROJECT_ID, MODEL_NAME)
if MODEL_VERSION is not None:
model_name += '/versions/{}'.format(MODEL_VERSION)
credentials = GoogleCredentials.get_application_default()
service = discovery.build('ml', 'v1', cache_discovery=False, credentials=credentials)
print("model_name=", model_name)
pprobas_temp = []
batch_size = 32
n_samples = min(1000,X_test.shape[0])
print("batch_size=", batch_size)
print("n_samples=", n_samples)
for i in range(0, n_samples, batch_size):
j = min(i+batch_size, n_samples)
print("Processing samples", i, j)
request = service.projects().predict(name=model_name, \
body={'instances': np.array(X_test)[i:j].tolist()})
try:
response = request.execute()
pprobas_temp += response['predictions']
except errors.HttpError as err:
# Something went wrong, print out some information.
tf.compat.v1.logging.error('There was an error getting the job info, Check the details:')
tf.compat.v1.logging.error(err._get_reason())
break
# Show the prediction results as an array
nPreds = len(pprobas_temp)
nClasses = len(np.unique(y_test))
pprobas = np.zeros((nPreds, nClasses))
for i in range(nPreds):
pprobas[i,:] = np.array(pprobas_temp[i][model_layers[-1]])
pprobas = np.round(pprobas, 2)
pprobas
###Output
_____no_output_____
###Markdown
Batch Prediction with GCLOUD
###Code
# Write batch data to file in GCS
import shutil
# Clean current directory
DATA_DIR = './batch_data'
shutil.rmtree(DATA_DIR, ignore_errors=True)
os.makedirs(DATA_DIR)
n_samples = min(1000,X_test.shape[0])
nFiles = 10
nRecsPerFile = min(1000,n_samples//nFiles)
print("n_samples =", n_samples)
print("nFiles =", nFiles)
print("nRecsPerFile =", nRecsPerFile)
# Create nFiles files with nImagesPerFile images each
for i in range(nFiles):
with open(f'{DATA_DIR}/unkeyed_batch_{i}.json', "w") as file:
for z in range(nRecsPerFile):
print(f'{{"flatten_input": {np.array(X_test)[i*nRecsPerFile+z].tolist()}}}', file=file)
#print(f'{{"{model_layers[0]}": {np.array(X_test)[i*nRecsPerFile+z].tolist()}}}', file=file)
#key = f'key_{i}_{z}'
#print(f'{{"image": {X_test_images[z].tolist()}, "key": "{key}"}}', file=file)
# Write batch data to gcs file
!gsutil -m cp -r ./batch_data gs://$BUCKET_NAME/$FOLDER_RESULTS/
# Remove old batch prediction results
!gsutil -m rm -r gs://$BUCKET_NAME/$FOLDER_RESULTS/batch_predictions
from datetime import datetime
from pytz import timezone
DATA_FORMAT="text" # JSON data format
INPUT_PATHS='gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/batch_data/*'
OUTPUT_PATH='gs://' + BUCKET_NAME + '/' + FOLDER_RESULTS + '/batch_predictions'
PRED_LABELS="mode=batch,team=ourteam,phase=test,owner="+USER
SIGNATURE_NAME="serving_default"
JOBNAME_BATCH = 'tf_batch_predict_'+ USER + '_' + \
datetime.now(timezone('US/Pacific')).strftime("%m%d%y_%H%M")
print("INPUT_PATHS = ", INPUT_PATHS)
print("OUTPUT_PATH = ", OUTPUT_PATH)
print("Job Name = ", JOBNAME_BATCH)
# Only works with global endpoint
# Submit batch predict job
# Use MODEL_VERSION_BATCH not MODEL_VERSION_ONLINE
MODEL_VERSION = MODEL_VERSION_BATCH
!gcloud ai-platform jobs submit prediction $JOBNAME_BATCH \
--model=$MODEL_NAME \
--version=$MODEL_VERSION \
--input-paths=$INPUT_PATHS \
--output-path=$OUTPUT_PATH \
--data-format=$DATA_FORMAT \
--labels=$PRED_LABELS \
--signature-name=$SIGNATURE_NAME \
--region=$REGION
# check the batch prediction job status
! gcloud ai-platform jobs describe $JOBNAME_BATCH
# Print Errors
#response = ! gcloud logging read "resource.labels.job_id=$JOBNAME_BATCH severity>=ERROR"
#for i in range(0,len(response)):
# if 'message' in response[i]:
# print(response[i])
print("errors")
!gsutil cat $OUTPUT_PATH/prediction.errors_stats-00000-of-00001
print("batch prediction results")
!gsutil cat $OUTPUT_PATH/prediction.results-00000-of-00010
###Output
errors
batch prediction results
{"dense_4": [1.5569074093946256e-05, 4.465508482098812e-06, 5.606467311736196e-05, 0.00013420336472336203, 1.1440502589721291e-07, 7.478337238353561e-07, 1.971690899438272e-08, 0.9997689127922058, 1.418449073753436e-06, 1.8464221284375526e-05]}
{"dense_4": [6.3324897325856e-08, 6.007076535752276e-06, 0.999923825263977, 6.897489947732538e-05, 5.897189403469838e-10, 3.7902630367625534e-08, 2.405205368205543e-09, 3.133243353659054e-08, 9.399802252119116e-07, 8.448650112846678e-11]}
{"dense_4": [1.3423500604403671e-05, 0.9984585046768188, 0.00042809778824448586, 2.7107555069960654e-05, 0.00011921809345949441, 5.862123543920461e-06, 5.339018389349803e-05, 0.0005637670983560383, 0.0003190849383827299, 1.1625871593423653e-05]}
{"dense_4": [0.9997149109840393, 1.5705425539636053e-05, 5.923803473706357e-05, 6.913218385307118e-06, 9.507743016001768e-06, 2.5874439302242536e-07, 7.189909229055047e-05, 3.961725815315731e-05, 1.439556285731669e-06, 8.058062667259946e-05]}
{"dense_4": [6.712702543154592e-06, 5.223053904046537e-06, 1.4331204738482484e-06, 1.0014654208134743e-06, 0.9987139701843262, 2.316390464329743e-06, 6.521816430904437e-06, 7.374328561127186e-05, 7.872374226280954e-06, 0.0011813044548034668]}
{"dense_4": [3.332219193907804e-06, 0.9995743632316589, 7.095322871464305e-06, 3.043560127480305e-06, 3.550470864865929e-05, 1.3461422554428282e-07, 2.102654661939596e-06, 0.00033996690763160586, 2.7185253202333115e-05, 7.282081696757814e-06]}
{"dense_4": [1.0935491445707157e-05, 1.833797068684362e-05, 1.5793181091794395e-06, 1.2263222970432253e-06, 0.9977511763572693, 8.715222065802664e-05, 9.493361721979454e-05, 9.691677405498922e-05, 0.0006836599786765873, 0.0012541586766019464]}
{"dense_4": [1.0457504686200991e-05, 2.943294748547487e-05, 3.084855416091159e-05, 0.0056397984735667706, 0.000863008841406554, 0.00010868833487620577, 1.568543410712664e-07, 0.00031805355683900416, 2.8520807973109186e-05, 0.9929710030555725]}
{"dense_4": [6.746971848770045e-06, 0.00012330865138210356, 0.0035320050083100796, 7.212071068352088e-05, 0.881248950958252, 0.05732075124979019, 0.03432289510965347, 1.2217979929118883e-05, 0.021398380398750305, 0.0019625776913017035]}
{"dense_4": [2.015493009821512e-06, 7.045231882329972e-07, 3.550965743670531e-08, 1.8802764316205867e-05, 0.000414410256780684, 1.6911846500988759e-07, 4.9013344494142075e-09, 6.613695586565882e-05, 1.2218575648148544e-05, 0.9994853734970093]}
{"dense_4": [0.9999059438705444, 2.0816207779716933e-06, 2.05008818738861e-05, 1.2732418497307663e-07, 1.9400572170980013e-07, 3.4996210729332233e-07, 7.548782832600409e-06, 7.950636245368514e-06, 3.221663007479947e-07, 5.5006494221743196e-05]}
{"dense_4": [0.00019126305414829403, 4.740441909234505e-06, 9.119046808336861e-06, 4.740329131891485e-06, 0.0002650430251378566, 0.00023146625608205795, 0.9980194568634033, 2.548422116888105e-06, 0.0012700916267931461, 1.4840749145150767e-06]}
{"dense_4": [6.627509492318495e-07, 4.584388761941227e-07, 5.849790696288437e-08, 1.9450588297331706e-05, 0.00011889265442732722, 1.4048619050299749e-06, 1.9281505281298905e-09, 3.835435563814826e-06, 1.0095283187183668e-06, 0.9998542070388794]}
{"dense_4": [0.9995830655097961, 6.16368924966082e-06, 1.491019156674156e-05, 9.093635640056164e-07, 4.204982815281255e-06, 1.3409986650003702e-06, 8.19709530333057e-05, 7.304663449758664e-05, 4.538379016594263e-06, 0.00022990703291725367]}
{"dense_4": [2.2747127559341607e-07, 0.9999274015426636, 2.4413054688920965e-06, 2.0477424186537974e-05, 4.441830697032856e-06, 5.651170198461841e-08, 1.7457701062539854e-07, 8.296668056573253e-06, 3.4202399547211826e-05, 2.448220811857027e-06]}
{"dense_4": [5.443382633529836e-07, 5.734827482228866e-06, 3.2654691040079342e-06, 0.026261314749717712, 2.1731025299231987e-06, 0.9734050631523132, 1.705573936305882e-06, 1.3207554729888216e-05, 7.291306974366307e-05, 0.00023407915432471782]}
{"dense_4": [1.826602419896517e-05, 6.428616643461282e-07, 1.0568534207777702e-06, 1.745328336255625e-05, 0.00045266313827596605, 2.4245838403658126e-07, 7.498055509813639e-08, 0.00011901054676854983, 2.0468412913032807e-05, 0.9993701577186584]}
{"dense_4": [9.765655704541132e-06, 8.352413374268508e-07, 0.00011584019375732169, 0.00011091023770859465, 2.5238634293600626e-08, 5.611731168642109e-08, 2.1642321268444675e-09, 0.9997580647468567, 1.327634890913032e-07, 4.398825694806874e-06]}
{"dense_4": [1.0386596841271967e-05, 1.0602975635265466e-05, 5.962078648735769e-05, 0.9783942103385925, 1.2325358511589002e-05, 0.0001646518794586882, 2.9519030704250326e-06, 3.5047145502176136e-05, 0.021242043003439903, 6.817725079599768e-05]}
{"dense_4": [3.9360446635328117e-07, 1.5146329133131076e-06, 8.913982583180768e-07, 1.0056473342956451e-07, 0.9999568462371826, 2.8909551019751234e-06, 1.7282200133195147e-06, 2.0352041246951558e-05, 4.4106798213761067e-07, 1.4787417057959829e-05]}
{"dense_4": [0.00029731859103776515, 4.8624599003233016e-05, 9.94099991658004e-07, 0.000681764620821923, 7.065559475449845e-05, 0.00010488789848750457, 3.454416486192713e-08, 0.014622341841459274, 2.875993050110992e-05, 0.9841446876525879]}
{"dense_4": [4.117673961445689e-05, 4.857050726059242e-07, 1.4468564586422872e-06, 5.958149245088862e-07, 2.2923772121430375e-05, 0.0005256680306047201, 0.9993765950202942, 6.190463182065287e-07, 3.0077948395046405e-05, 4.764787320254982e-07]}
{"dense_4": [1.5325727872550488e-05, 1.468697973905364e-06, 4.894273388345027e-06, 8.483966098538076e-07, 0.00011054472997784615, 3.934773485525511e-05, 0.9996366500854492, 4.567689302348299e-06, 0.00018623023061081767, 1.6563397764457477e-07]}
{"dense_4": [4.421589494540967e-08, 3.179950169851509e-08, 2.569193497947708e-07, 2.1202895368332975e-05, 5.05199636791076e-07, 0.9999616146087646, 3.695571194839431e-06, 2.0338148942755652e-07, 1.2245803873156547e-06, 1.1114073458884377e-05]}
{"dense_4": [6.950744864298031e-05, 6.571976700797677e-05, 1.599243114469573e-05, 7.958627975312993e-06, 0.9962961077690125, 7.530699804192409e-05, 9.499535372015089e-05, 0.0005604755715467036, 7.887463289080188e-05, 0.0027351202443242073]}
{"dense_4": [0.9999439716339111, 1.430406086910807e-07, 1.5413537539643585e-06, 1.3279888300132825e-08, 6.942753316252492e-06, 6.233648042197615e-10, 7.171362540248083e-06, 3.484597357328312e-07, 2.4013417032620055e-07, 3.96803006879054e-05]}
{"dense_4": [0.00012615967716556042, 1.656372296565678e-05, 0.00012327598233241588, 0.0002432389883324504, 1.2944790341862245e-06, 1.4383305824594572e-05, 2.0745643780628598e-07, 0.9986451268196106, 4.336720849096309e-06, 0.0008253980777226388]}
{"dense_4": [2.864534742741398e-08, 5.926443691350869e-08, 1.106774139003619e-08, 4.337414871713463e-09, 0.999994158744812, 3.963565688991366e-07, 1.3506510754268675e-07, 5.95561402860767e-07, 4.082919602410584e-08, 4.613789769791765e-06]}
{"dense_4": [0.9996428489685059, 4.6034787715143466e-07, 0.00034793568192981184, 2.0498399067037099e-07, 5.842009898060496e-08, 2.0277397538848163e-08, 1.5161209603320458e-06, 9.375552281198907e-07, 6.213206233951496e-07, 5.4716351769457106e-06]}
{"dense_4": [1.0665927447917056e-06, 0.9994833469390869, 1.5662588339182548e-05, 4.610494215739891e-05, 0.00017407821724191308, 3.1212723570206435e-06, 7.450837074429728e-06, 4.7874120355118066e-05, 0.0002140892465831712, 7.214495326479664e-06]}
{"dense_4": [3.485829846994193e-09, 1.3926383246598562e-07, 1.7195761259358733e-08, 0.999984860420227, 4.830705169212024e-09, 2.642930894580786e-06, 2.378669136660072e-11, 3.648669064659771e-07, 1.190092007163912e-07, 1.1954361070820596e-05]}
{"dense_4": [4.1989169403677806e-05, 0.9939885139465332, 0.00023143520229496062, 0.000467840232886374, 0.0017223802860826254, 2.1851879864698276e-05, 2.8205846319906414e-05, 0.0015598133904859424, 0.0011824371758848429, 0.0007556051132269204]}
{"dense_4": [1.4704270023813137e-09, 1.5099801942142221e-07, 2.585755964901182e-08, 0.9999945163726807, 2.8277360897277504e-08, 2.089610461553093e-06, 1.445890907003644e-10, 1.876148303381342e-08, 2.0603122266038554e-06, 1.1246230542383273e-06]}
{"dense_4": [0.0005872235633432865, 4.045620244141901e-06, 4.563653783407062e-05, 6.027728431945434e-07, 0.9984148740768433, 4.2865463910857216e-05, 0.0008623913745395839, 2.6219257051707245e-05, 6.5173398979823105e-06, 9.604128536011558e-06]}
{"dense_4": [1.6163271538971458e-06, 5.653185326082166e-06, 6.230635335668921e-05, 0.0002850361343007535, 7.771900101261053e-08, 1.4939345760467404e-08, 8.85596396216215e-09, 0.9996383190155029, 3.250707095503458e-06, 3.6677843127108645e-06]}
{"dense_4": [8.443149681625073e-07, 0.000688467116560787, 0.9989867806434631, 0.00026798417093232274, 8.637068837913375e-09, 2.206630142609356e-06, 1.71692633443854e-08, 4.072795491083525e-05, 1.2371518096188083e-05, 5.978006925033696e-07]}
{"dense_4": [5.8380260270496365e-06, 6.182613105920609e-06, 0.0003089890524279326, 6.427273183362558e-05, 5.327158802970189e-08, 1.7004512642415648e-07, 3.156231187517733e-08, 0.9996024966239929, 2.5807448764680885e-06, 9.469944416196086e-06]}
{"dense_4": [9.20690979455685e-07, 0.9997621178627014, 6.24218728262349e-06, 6.926334208401386e-06, 4.2113133531529456e-05, 3.5096684314339655e-07, 2.1129485503479373e-06, 6.729870801791549e-05, 0.00010107098933076486, 1.0857039342226926e-05]}
{"dense_4": [0.00023532846535090357, 0.0009831805946305394, 0.8774676322937012, 0.11667138338088989, 2.539358547437587e-06, 0.0004478911869227886, 9.468862117500976e-06, 0.00013741532166022807, 0.004030674695968628, 1.4404648027266376e-05]}
{"dense_4": [1.5446631778104347e-06, 0.9998629093170166, 1.2754469480569242e-06, 1.0386269423179328e-05, 6.147311796667054e-05, 4.7293294613837134e-08, 2.3831394173612352e-06, 1.432539829693269e-05, 4.08755149692297e-05, 4.841976533498382e-06]}
{"dense_4": [7.46308287489228e-05, 0.9963089823722839, 0.0004667982866521925, 0.00015182248898781836, 0.00029063853435218334, 2.4462491637677886e-05, 0.00014966211165301502, 0.0011903287377208471, 0.001044334378093481, 0.00029829906998202205]}
{"dense_4": [2.529483026592061e-05, 4.7505080146947876e-05, 0.00036648340756073594, 0.0003180412168148905, 1.4941231256671017e-06, 2.9773611913697096e-06, 2.602837696485949e-07, 0.9991552829742432, 4.205890036246274e-06, 7.84882577136159e-05]}
{"dense_4": [4.196063940753447e-08, 1.0986278766722535e-06, 1.977828034682716e-08, 1.2436903773505037e-07, 0.9998840093612671, 7.564297987983082e-08, 4.3475347411003895e-07, 4.160959360888228e-05, 1.5702353266533464e-06, 7.103652751538903e-05]}
{"dense_4": [0.00018604107026476413, 0.015205872245132923, 0.9486846327781677, 0.004650505725294352, 0.020853115245699883, 0.0015132816042751074, 0.003219767939299345, 0.0031987919937819242, 0.0024776775389909744, 1.032319778460078e-05]}
{"dense_4": [5.653632939583986e-08, 8.649007213534787e-06, 1.2966139365744311e-05, 0.9998767375946045, 6.93821220920654e-07, 5.2055234846193343e-05, 6.561307230867897e-08, 1.3433960930342437e-06, 4.582853216561489e-05, 1.6364347175112925e-06]}
{"dense_4": [2.991972905874718e-06, 1.1410419347157585e-06, 1.2089757319699856e-06, 0.0001507979177404195, 1.2899838111479767e-05, 0.9995959401130676, 0.00011437755892984569, 6.534246949740918e-06, 7.413715502480045e-05, 3.9997237763600424e-05]}
{"dense_4": [3.3258453413509415e-07, 0.9989987015724182, 4.578313382808119e-05, 0.0002951081842184067, 0.00015111036191228777, 1.1226526339669363e-06, 6.082308914301393e-07, 5.5109783716034144e-05, 0.0004132276226300746, 3.89092238037847e-05]}
{"dense_4": [2.8045587896485813e-06, 3.6126086342846975e-05, 0.9998457431793213, 4.7215085942298174e-05, 8.356998478120659e-06, 2.579313331807498e-06, 3.322369957459159e-05, 1.1667711078189313e-05, 1.2089464689779561e-05, 2.757557027166513e-08]}
{"dense_4": [4.674211018240726e-10, 7.561657966448365e-09, 5.997694701997958e-11, 2.9551153191142987e-10, 0.999998927116394, 1.866551713192166e-08, 2.1325757160184367e-08, 1.856056570659348e-07, 3.35590222277915e-08, 8.555180102121085e-07]}
{"dense_4": [6.157394523143012e-07, 8.307010830321815e-07, 1.7231486992841383e-07, 1.2393680037803279e-07, 0.9998052716255188, 3.6171471151646983e-07, 9.958749842553516e-07, 1.3373633919400163e-05, 1.0287787972629303e-06, 0.00017722704797051847]}
{"dense_4": [0.00019200700626242906, 5.2816308198089246e-06, 3.678864231915213e-05, 8.746880666876677e-06, 5.782898733741604e-05, 0.00031655156635679305, 0.9993688464164734, 4.428342890605563e-06, 9.075137313629966e-06, 4.485529814246547e-07]}
{"dense_4": [4.1557616015097665e-08, 8.367489385818772e-07, 8.726315172680188e-07, 0.999950647354126, 1.1789078513402274e-07, 5.117439741297858e-06, 2.3180544150847027e-09, 1.5323224715757533e-06, 3.820652636932209e-05, 2.555492073952337e-06]}
{"dense_4": [2.3458900955120043e-07, 3.6013443605042994e-07, 1.1576701126614353e-06, 5.594071262748912e-05, 4.922210064250976e-05, 0.9997097849845886, 3.9574406400788575e-06, 6.442326139222132e-06, 2.43469730776269e-06, 0.00017052682233043015]}
{"dense_4": [8.277866072603501e-06, 1.205041553475894e-05, 7.074770564940991e-06, 0.005122547969222069, 1.259358487004647e-05, 0.9929267764091492, 1.9889102986780927e-05, 3.776459561777301e-05, 0.00016394715930800885, 0.0016890630358830094]}
{"dense_4": [1.170138148154365e-06, 2.7865896257139866e-08, 6.519488948697472e-08, 6.997015855603195e-09, 1.5382320270873606e-06, 7.537668352597393e-06, 0.9999812841415405, 3.27414162448747e-09, 8.218167749873828e-06, 3.1490416940727073e-09]}
{"dense_4": [0.9942948222160339, 2.4168044546968304e-05, 0.0002622847387101501, 5.956654604233336e-06, 7.879588520154357e-05, 1.6484413208672777e-05, 0.005089109297841787, 2.25154963118257e-05, 0.00015731404710095376, 4.854802318732254e-05]}
{"dense_4": [7.10202563425355e-09, 2.142387600656548e-08, 1.3750898197883998e-09, 9.66823177073195e-10, 0.9999964237213135, 8.406838958308072e-08, 6.179536882200409e-08, 1.3010991324335919e-06, 3.0809044204715974e-08, 2.0470627077884274e-06]}
{"dense_4": [1.1130178791063372e-05, 0.9987505674362183, 6.54939649393782e-05, 1.5853032891754992e-05, 0.00010826435027411208, 1.0939405683529912e-06, 1.1199110304005444e-05, 0.0008791183936409652, 0.00013186008436605334, 2.543115806474816e-05]}
{"dense_4": [7.613519414917391e-07, 1.876324091654169e-07, 4.915964879614876e-08, 2.5779512725421228e-05, 0.0003002433804795146, 3.372259698153357e-07, 1.780596337042084e-09, 1.639175388845615e-05, 5.168679990674718e-07, 0.9996557235717773]}
{"dense_4": [0.00016252034401986748, 6.340761319734156e-05, 4.0344784792978317e-05, 0.0006193959852680564, 6.651756120845675e-05, 0.9932358860969543, 6.466973718488589e-05, 0.00427428912371397, 0.0007852190756238997, 0.0006877689156681299]}
{"dense_4": [3.62136290732451e-07, 1.9552506103082123e-07, 2.4222572392318398e-06, 2.2085390810389072e-05, 3.898394052015419e-09, 2.23579831981624e-07, 3.276232340976293e-10, 0.9999697208404541, 7.540078783563331e-09, 4.949535650666803e-06]}
{"dense_4": [1.8512742826715112e-05, 1.935303043865133e-05, 0.002332694362848997, 0.005423806142061949, 2.644502728799125e-06, 1.156167945737252e-05, 1.091544277187495e-06, 2.912274248956237e-05, 0.9918618202209473, 0.00029939692467451096]}
{"dense_4": [0.00046537071466445923, 6.903315079398453e-05, 0.00017623712483327836, 0.0006321826949715614, 0.01116980891674757, 0.0042807962745428085, 0.00018886776524595916, 0.0004398205492179841, 0.017270201817154884, 0.9653075933456421]}
{"dense_4": [3.4900126593129244e-06, 9.587824933987577e-06, 0.0014516266528517008, 0.9935018420219421, 9.459224656893639e-07, 4.976581635673938e-07, 5.9775158156583075e-09, 4.7210546654241625e-06, 0.004932326730340719, 9.494545520283282e-05]}
{"dense_4": [1.0461753845447674e-06, 4.0836346215655794e-07, 1.7367005057167262e-05, 0.0008131434442475438, 1.1064365708080004e-06, 3.999985231928349e-09, 3.3594134141168297e-09, 0.9991458654403687, 3.7601193980663083e-07, 2.075003067147918e-05]}
{"dense_4": [2.6339910618844442e-05, 0.00010306161129847169, 5.080848950456129e-06, 5.3310075600165874e-05, 0.9937859773635864, 0.00018982740584760904, 4.4602464186027646e-05, 0.00010458700126037002, 4.335083212936297e-05, 0.005643988493829966]}
{"dense_4": [0.0012026369804516435, 0.00014754700532648712, 0.0014444958651438355, 0.00020413206948433071, 0.00229168264195323, 0.00024496990954503417, 0.9927600622177124, 0.0008546593016944826, 0.0008422359824180603, 7.585185358038871e-06]}
{"dense_4": [1.3365335860271443e-07, 1.7864605297290836e-06, 2.698040816540015e-06, 5.838607535224583e-07, 0.9998093247413635, 4.7674234338046517e-07, 2.990045402384567e-07, 1.5583018466713838e-06, 1.556564143356809e-06, 0.0001816393923945725]}
{"dense_4": [2.1491086688030236e-09, 4.3951931871788474e-08, 1.3298973478015341e-08, 0.9999934434890747, 3.540754134334634e-09, 2.2519093079154118e-07, 2.4715547317089026e-11, 1.9316372501521073e-08, 4.991471541870851e-06, 1.2899929515697295e-06]}
{"dense_4": [0.9996438026428223, 8.68607730808435e-06, 2.997780029545538e-06, 5.869712254025217e-07, 1.0299986570316833e-06, 2.5447529878874775e-06, 0.0002956005046144128, 3.2532589102629572e-06, 8.19626393422368e-07, 4.063532469444908e-05]}
{"dense_4": [5.607432285614777e-06, 1.7539526879772893e-06, 1.7753667634679005e-05, 4.5501426939154044e-05, 4.719076684978063e-08, 6.151092435402461e-08, 3.799419445726926e-09, 0.9999147653579712, 6.644120986720736e-08, 1.4424175788008142e-05]}
{"dense_4": [0.9999973773956299, 1.9203158174718737e-08, 2.752326011545847e-08, 1.8284834757409385e-10, 4.598343927852966e-09, 9.87087855897073e-10, 5.720167450817826e-07, 4.4335877191770123e-08, 2.4660340436355455e-09, 1.930554844875587e-06]}
{"dense_4": [4.909317067358643e-05, 1.0733077942859381e-05, 0.9990701079368591, 0.0008520300616510212, 1.4351333454953874e-08, 2.071047902063583e-07, 1.1586362091975388e-08, 1.65510118677048e-05, 1.096802179745282e-06, 2.370875051838084e-07]}
{"dense_4": [3.5637534892885014e-05, 1.988271833397448e-05, 1.1808965609816369e-06, 0.0004739372816402465, 9.87469611573033e-05, 7.444082683605302e-08, 1.3738686632791541e-08, 0.0004685870953835547, 0.00043420353904366493, 0.998467743396759]}
{"dense_4": [1.0702991630751058e-06, 0.9995343685150146, 2.0405514078447595e-06, 9.115207831200678e-06, 0.00026057742070406675, 8.307810048790998e-07, 2.7401565603213385e-06, 3.888908395310864e-05, 0.00010512146400287747, 4.530381193035282e-05]}
{"dense_4": [1.2112924423490767e-06, 1.883148252090905e-05, 1.2147565939812921e-05, 0.00010710670903790742, 1.6574936125834938e-06, 3.258947600670581e-08, 1.7957809461677243e-08, 0.9998078942298889, 8.347693096766307e-07, 5.026471626479179e-05]}
{"dense_4": [4.851787238635552e-08, 4.557562988338759e-06, 6.23226355855877e-07, 0.9999253749847412, 4.4227601847524056e-07, 1.300750591326505e-05, 3.132308412645557e-09, 1.5752716819861234e-07, 3.8076941564213485e-05, 1.7793297956814058e-05]}
{"dense_4": [3.9534003008157015e-05, 0.0028476824518293142, 0.9953285455703735, 0.00021031273354310542, 2.0567899809975643e-06, 8.511372470820788e-06, 7.025736977084307e-06, 0.0012790231266990304, 0.000252466241363436, 2.4771099560894072e-05]}
{"dense_4": [2.2644229829893447e-05, 0.00012218751362524927, 1.5092207661382417e-07, 0.00011782807996496558, 0.008980984799563885, 4.12814461014932e-06, 2.0483905416313064e-07, 0.00036119454307481647, 9.388887701788917e-05, 0.9902967810630798]}
{"dense_4": [5.131082434672862e-05, 4.0974540752358735e-05, 0.0005086185410618782, 0.04088658094406128, 1.8089065179083264e-07, 1.9859498934238218e-06, 1.0311749676361615e-08, 0.9526558518409729, 0.0012181526981294155, 0.004636329598724842]}
{"dense_4": [0.00010299132554791868, 1.4596721484849695e-05, 4.8512356443097815e-06, 2.1249124984024093e-05, 1.8382905182079412e-05, 0.00011134202213725075, 5.021976789976179e-07, 0.9970218539237976, 1.6750639133533696e-06, 0.0027024676091969013]}
{"dense_4": [0.00039832814945839345, 8.152820555551443e-06, 2.672470327524934e-05, 2.5097453999478603e-06, 7.53708227421157e-05, 0.00024328746076207608, 0.999210000038147, 2.9278642159624724e-06, 2.116926589224022e-05, 1.1488057680253405e-05]}
{"dense_4": [5.872686337227151e-09, 7.817757818884274e-08, 0.9999958276748657, 3.648735173555906e-06, 2.3042028285402694e-09, 4.014672760899174e-10, 9.602996176028e-11, 3.813002535935084e-07, 8.314423638466906e-08, 2.0800612621219017e-10]}
{"dense_4": [2.7238445909461007e-05, 6.07698893873021e-06, 4.137259111303138e-06, 3.6611816085496685e-06, 1.1826674608528265e-06, 1.1839798617074848e-06, 6.196208346409549e-08, 0.9998487234115601, 3.530218748437619e-07, 0.00010722698061726987]}
{"dense_4": [8.081067790044472e-06, 6.476014095824212e-05, 3.300377284176648e-05, 0.00014483100676443428, 9.4217000878416e-05, 0.00012424588203430176, 6.478651630459353e-05, 6.814670086896513e-06, 0.9994145631790161, 4.467058533919044e-05]}
{"dense_4": [5.805190772534274e-10, 1.3613750127206004e-08, 3.1187066817928155e-10, 5.519272261444996e-10, 0.9999990463256836, 8.921544392137548e-09, 1.3696285883213477e-08, 1.0616611234581796e-07, 3.857326902334535e-08, 8.253370538113813e-07]}
{"dense_4": [6.149541604827391e-06, 3.32526360580232e-05, 0.00011225066555198282, 0.0005996235995553434, 1.1851007997165652e-07, 3.175686060785665e-07, 1.2744119537444476e-08, 0.9991897940635681, 1.888413135020528e-05, 3.956983346142806e-05]}
{"dense_4": [1.3465202641782525e-07, 1.7457354260841385e-05, 1.3017292985750828e-06, 0.9999065399169922, 1.0205314993072534e-06, 1.9982055164291523e-05, 3.759726752150527e-08, 4.739620635518804e-05, 3.184260549460305e-06, 2.8625252070924034e-06]}
{"dense_4": [5.76184106648725e-07, 1.1310038239287223e-08, 1.3482767258210515e-07, 1.7169269339589732e-09, 1.120237629947951e-05, 5.937279752288305e-07, 0.9999865293502808, 5.285635751306472e-08, 7.974821869538573e-07, 9.437547410229286e-10]}
{"dense_4": [9.052580935531296e-06, 0.9992167949676514, 9.264230175176635e-05, 5.022254481445998e-05, 0.00016545764810871333, 1.4340662346512545e-06, 2.465282341290731e-05, 0.00016860873438417912, 0.00026677356800064445, 4.280449047655566e-06]}
{"dense_4": [7.257992820086656e-07, 8.49700427352218e-06, 2.739346882663085e-06, 0.9991341233253479, 4.130336037633242e-07, 3.0805105780018494e-05, 1.429507179295797e-08, 1.3348090988074546e-06, 0.0006434918032027781, 0.00017783093790058047]}
{"dense_4": [2.818382199620828e-06, 3.434464872498211e-07, 2.544562448747456e-06, 1.0018027296609944e-06, 2.0598911305569345e-06, 2.4381424736930057e-05, 0.9999607801437378, 1.1415362166644627e-07, 5.8862542573479e-06, 1.6820454362687087e-08]}
{"dense_4": [0.0004062233492732048, 0.00044782477198168635, 6.798261892981827e-05, 0.0007093496969901025, 0.21590425074100494, 0.00012047007476212457, 6.1054750403855e-05, 0.00204681558534503, 0.002109599532559514, 0.7781264185905457]}
{"dense_4": [1.9808124918085923e-08, 9.186272222905245e-07, 4.474032877510581e-08, 0.9998049139976501, 6.576580613426586e-09, 3.1079827749636024e-05, 3.345768800899762e-11, 1.1006628852783251e-07, 1.9587121187214507e-06, 0.00016095633327495307]}
{"dense_4": [1.600689358838281e-07, 0.9998149275779724, 5.321024673321517e-06, 2.0794432202819735e-06, 2.4734694306971505e-05, 4.2006185196896695e-08, 1.1732216762538883e-06, 5.006630999560002e-06, 0.00014510889013763517, 1.556822212478437e-06]}
{"dense_4": [2.2946021260850102e-07, 5.079943434793677e-07, 6.036219133420673e-07, 1.2454750049073482e-06, 0.9998249411582947, 1.2133039035688853e-06, 6.3804659475863446e-06, 8.162371045727923e-07, 6.821285751357209e-06, 0.0001572160836076364]}
{"dense_4": [6.799281254643574e-05, 0.9470173716545105, 0.0003937732835765928, 0.002932931063696742, 0.019821874797344208, 0.0004472081200219691, 5.2283448894741014e-05, 0.004704243503510952, 0.011075703427195549, 0.013486633077263832]}
{"dense_4": [6.288865961323609e-07, 9.20075035537593e-06, 8.977236575447023e-05, 0.0004458693729247898, 6.209404546098085e-06, 2.2431866142369472e-08, 2.290657050707523e-07, 0.9994433522224426, 3.3251669719902566e-06, 1.4334204934129957e-06]}
{"dense_4": [0.0008987342589534819, 2.7072263037553057e-05, 2.5740806449903175e-05, 4.193161942112056e-07, 0.0001455019519198686, 0.0002492906933184713, 0.9986374974250793, 1.183316157948866e-06, 1.2013129889965057e-05, 2.460050382069312e-06]}
{"dense_4": [1.5562621911158203e-06, 3.054892516729524e-08, 1.2458880860322097e-07, 0.00016186163702514023, 1.2108479268135852e-06, 8.275424079329241e-08, 5.022530169540573e-10, 9.900883014779538e-06, 3.017926019310835e-06, 0.9998223185539246]}
|
examples/notebooks/2_Build_a_CNN_with_NETS.ipynb
|
###Markdown
**Build a CNN on MNIST Dataset** 0. Set UpFor this tutorial, we will be using ``numpy`` to generate data and all operations, ``matplotlib`` to render and plot images and of course ``nets`` for ``Tensor`` operations and computational graph creation. We will set the ``SEED`` for random numbers from ``numpy``, so each time we reload the notebook the results will be the same as numbers will be generated in the same way. Finally, we set an autoreload with ``%load_ext autoreload`` and ``%autoreload 2`` for debug mode purposes. This enable the notebook to reload the modules each time we run a cell, so it keeps everything up-to-date. More info [on StackOverflow](http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython).Let's load the packages we need to run our tests:
###Code
import numpy as np
import random as rd
import matplotlib.pyplot as plt
# This is a bit of magic to make matplotlib figures appear inline in the notebook
# rather than in a new window.
%matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.style.use('seaborn-darkgrid')
# Change the font ?
font = {'size' : 15}
plt.rc('font', **font)
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
%load_ext autoreload
%autoreload 2
from psutil import virtual_memory
gpu_info = !nvidia-smi
gpu_info = '\n'.join(gpu_info)
print(gpu_info)
ram_gb = virtual_memory().total / 1e9
print('Your runtime has {:.1f} gigabytes of available RAM\n'.format(ram_gb))
import sys
sys.path.append("../../")
# Let's fix the seed
SEED = 42
rd.seed(SEED)
np.random.seed(SEED)
###Output
_____no_output_____
###Markdown
1. Download the dataset
###Code
import numpy as np
import random as rd
import matplotlib.pyplot as plt
import nets
import nets.datasets as datasets
# IMPORTANT: normalize the data !
# 1. reshape from (data_length, 32, 32, 3) to (data_length, 32 * 32 * 3)
reshape = lambda x: x.reshape(-1, 784)
# 2. scale the data: data = (data - mean) / std
normalize = lambda x: (x - np.mean(x, axis=1, keepdims=True)) / np.std(x, axis=1, keepdims=True)
# 3. all together
transform = lambda x: normalize(reshape(x)).reshape(-1, 1, 28, 28)#.transpose(0, 3, 1, 2)
# Download the training and testing data
train_data, test_data = datasets.MNIST.splits(transform = transform)
train_data
train_data.data.cuda()
train_data.labels.cuda()
# CIFAR10 classes
# classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
# Configure the mosaic to display images
fig = plt.figure(figsize = (20, 10))
columns = 8
rows = 4
# Create the mosaic
for i in range(1, columns * rows + 1):
# Choose randomly an image
image_index = rd.randint(0, len(train_data))
image, label = train_data[image_index]
# Plot it
fig.add_subplot(rows, columns, i)
plt.imshow(image.numpy().reshape(28, 28))
# Remove axis and display image's label
# plt.title(classes[label.data])
plt.axis('off')
# Render
plt.show()
###Output
_____no_output_____
###Markdown
2. Load the data
###Code
from nets.data import Iterator
BATCH_SIZE = 64
iterator = Iterator(train_data,
batch_size = BATCH_SIZE,
shuffle = True)
import nets.nn as nn
class Classifier(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim):
super().__init__()
# Input batch shape: [batch_size, num_channel, height, width]
# for MNIST: [batch_size, 1 , 28 , 28 ]
self.conv = nn.Conv2d(1, 1, (3, 3), stride=1, pad=0)
# Output shape: [batch_size, num_channel, height - 2, width - 2]
# ie: [batch_size, 1 , 26 , 26 ]
self.pool = nn.MaxPool2d((2, 2), stride=2, pad=0)
# Output shape: [batch_size, num_channel, height / 2, width / 2]
# ie: [batch_size, 1 , 13 , 13 ]
# Input shape: [batch_size, num_channel * height * width]
# ie: [batch_size, 169 ]
self.layer1 = nn.Linear(input_dim, hidden_dim)
# Output shape: [batch_size, hidden_dim]
self.layer2 = nn.Linear(hidden_dim, hidden_dim)
self.layer3 = nn.Linear(hidden_dim, output_dim)
# Output shape: [batch_size, output_dim]
def forward(self, inputs):
# Convolution layer
out = self.conv(inputs)
# Decrease the dimensions
out = self.pool(out)
# Transition from 4-d tensor to 2-d tensor
out = out.reshape(out.shape[0], -1)
# From: [batch_size, 1, H, W]
# to: [batch_size, input_dim]
# Classification layer(s)
out = nets.relu(self.layer1(out))
out = nets.relu(self.layer2(out))
# Prediction layer
out = self.layer3(out)
return out
model = Classifier(169, 100, 10)
# Let's check the architecture
model
model.cuda()
###Output
_____no_output_____
###Markdown
5.3. Criterion & Optimizer
###Code
from nets.optim import SGD
from nets.nn import CrossEntropyLoss, MSELoss
from nets.nn.utils import one_hot
# How much do we update the parameters
LEARNING_RATE = 0.1
optimizer = SGD(model.parameters(), lr=LEARNING_RATE)
criterion = CrossEntropyLoss()
criterion.cuda()
###Output
_____no_output_____
###Markdown
5.4. Training
###Code
import time
from tqdm import tqdm
# Training iterations
EPOCHS = 10
#Display the time during training
start = time.time()
# Record the loss
history = []
# Run the simulation EPOCHS times
for epoch in range(EPOCHS):
# Keep track of the loss at each epoch
epoch_loss = 0.0
epoch_start = time.time()
# Number of batches
N = len(iterator)
# Predict for all batches
trange = tqdm(iterator, ascii=True, position=0, leave=True, total=len(iterator))
for idx, batch in enumerate(trange):
trange.set_description(f"Epoch {epoch:4d}/{EPOCHS}")
# Gradients accumulates, therefore we need to set them to zero at each iteration
model.zero_grad()
# Get the data from the batch
image, label = batch
# Run the forward pass on the model
predictions = model(image)
# Get how far are the predictions from the truth (aka gold labels)
label = one_hot(label, 10).astype(int)
loss = criterion(predictions, label)
# Compute the gradient
loss.backward()
# Update the parameters
optimizer.step()
# Record the loss for plotting
loss_avg = loss.item() / BATCH_SIZE # .item() is really important, it will free the memory from the computational graph
epoch_loss += loss_avg
# Let's plot a progress bar in the console
trange.set_postfix({f"train loss": f"{loss_avg:.6f}"})
# Update the history of all previous loss
history.append(epoch_loss / N)
# Check the mean loss for this loop
print(f"\nEpoch: {epoch:4d}/{EPOCHS} | loss: {epoch_loss / N:1.3E}")
# Duration of the total training loop
print('\ntime:', time.time() - start)
###Output
Epoch 0/10: : 938it [00:25, 36.64it/s, train loss=0.006496]
0%| | 0/937 [00:00<?, ?it/s]
###Markdown
```Epoch: 0/10 | loss: 1.163E-02Epoch: 1/10 | loss: 6.256E-03```
###Code
plt.plot(history)
plt.title("Loss on MNIST dataset")
plt.xlabel("epochs")
plt.ylabel("loss")
# Configure the mosaic to display images
fig = plt.figure(figsize = (20, 10))
columns = 4
rows = 1
# Conv2d filter
fig.add_subplot(rows, columns, 1)
conv_filter = model._modules['conv'].weight
plt.imshow(conv_filter.numpy().reshape(3, 3))
plt.title('Conv2d filter')
plt.axis('off')
# Linear1 weight
fig.add_subplot(rows, columns, 2)
layer1_weightr = model._modules['layer1'].weight
plt.imshow(layer1_weightr.numpy())
plt.title('Layer1 weight')
plt.axis('off')
# Linear2 weight
fig.add_subplot(rows, columns, 3)
layer1_weightr = model._modules['layer2'].weight
plt.imshow(layer1_weightr.numpy())
plt.title('Layer2 weight')
plt.axis('off')
# Linear3 weight
fig.add_subplot(rows, columns, 4)
layer1_weightr = model._modules['layer3'].weight
plt.imshow(layer1_weightr.numpy())
plt.title('Layer3 weight')
plt.axis('off')
# Render
plt.show()
###Output
_____no_output_____
|
Colab_notebooks/CycleGAN_ZeroCostDL4Mic.ipynb
|
###Markdown
**CycleGAN**---CycleGAN is a method that can capture the characteristics of one image domain and learn how these characteristics can be translated into another image domain, all in the absence of any paired training examples. It was first published by [Zhu *et al.* in 2017](https://arxiv.org/abs/1703.10593). Unlike pix2pix, the image transformation performed does not require paired images for training (unsupervised learning) and is made possible here by using a set of two Generative Adversarial Networks (GANs) that learn to transform images both from the first domain to the second and vice-versa. **This particular notebook enables unpaired image-to-image translation. If your dataset is paired, you should also consider using the pix2pix notebook.**---*Disclaimer*:This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.This notebook is based on the following paper: **Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks** from Zhu *et al.* published in arXiv in 2018 (https://arxiv.org/abs/1703.10593)The source code of the CycleGAN PyTorch implementation can be found in: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix**Please also cite this original paper when using or developing this notebook.** **License**---
###Code
#@markdown ##Double click to see the license information
#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------
#This ZeroCostDL4Mic notebook is distributed under the MIT licence
#------------------------- LICENSE FOR CycleGAN ------------------------------------
#Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#--------------------------- LICENSE FOR pix2pix --------------------------------
#BSD License
#For pix2pix software
#Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#----------------------------- LICENSE FOR DCGAN --------------------------------
#BSD License
#For dcgan.torch software
#Copyright (c) 2015, Facebook, Inc. All rights reserved.
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###Output
_____no_output_____
###Markdown
**0. Before getting started**--- To train CycleGAN, **you only need two folders containing PNG images**. The images do not need to be paired.While you do not need paired images to train CycleGAN, if possible, **we strongly recommend that you generate a paired dataset. This means that the same image needs to be acquired in the two conditions. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. Please note that you currently can **only use .png files!**Here's a common data structure that can work:* Experiment A - **Training dataset (non-matching images)** - Training_source - img_1.png, img_2.png, ... - Training_target - img_1.png, img_2.png, ... - **Quality control dataset (matching images)** - Training_source - img_1.png, img_2.png - Training_target - img_1.png, img_2.png - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Install CycleGAN and dependencies**---
###Code
Notebook_version = '1.13'
Network = 'CycleGAN'
from builtins import any as b_any
def get_requirements_path():
# Store requirements file in 'contents' directory
current_dir = os.getcwd()
dir_count = current_dir.count('/') - 1
path = '../' * (dir_count) + 'requirements.txt'
return path
def filter_files(file_list, filter_list):
filtered_list = []
for fname in file_list:
if b_any(fname.split('==')[0] in s for s in filter_list):
filtered_list.append(fname)
return filtered_list
def build_requirements_file(before, after):
path = get_requirements_path()
# Exporting requirements.txt for local run
!pip freeze > $path
# Get minimum requirements file
df = pd.read_csv(path, delimiter = "\n")
mod_list = [m.split('.')[0] for m in after if not m in before]
req_list_temp = df.values.tolist()
req_list = [x[0] for x in req_list_temp]
# Replace with package name and handle cases where import name is different to module name
mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]
mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]
filtered_list = filter_files(req_list, mod_replace_list)
file=open(path,'w')
for item in filtered_list:
file.writelines(item + '\n')
file.close()
import sys
before = [str(m) for m in sys.modules]
#@markdown ##Install CycleGAN and dependencies
#------- Code from the cycleGAN demo notebook starts here -------
#Here, we install libraries which are not already included in Colab.
!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
import os
os.chdir('pytorch-CycleGAN-and-pix2pix/')
!pip install -r requirements.txt
!pip install fpdf
import imageio
from skimage import data
from skimage import exposure
from skimage.exposure import match_histograms
from skimage.util import img_as_int
# ------- Common variable to all ZeroCostDL4Mic notebooks -------
import numpy as np
from matplotlib import pyplot as plt
import urllib
import os, random
import shutil
import zipfile
from tifffile import imread, imsave
import time
import sys
from pathlib import Path
import pandas as pd
import csv
from glob import glob
from scipy import signal
from scipy import ndimage
from skimage import io
from sklearn.linear_model import LinearRegression
from skimage.util import img_as_uint
import matplotlib as mpl
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from astropy.visualization import simple_norm
from skimage import img_as_float32
from skimage.util import img_as_ubyte
from tqdm import tqdm
from fpdf import FPDF, HTMLMixin
from datetime import datetime
from pip._internal.operations.freeze import freeze
import subprocess
import torch
# Colors for the warning messages
class bcolors:
WARNING = '\033[31m'
#Disable some of the tensorflow warnings
import warnings
warnings.filterwarnings("ignore")
print("Libraries installed")
# Check if this is the latest version of the notebook
All_notebook_versions = pd.read_csv("https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv", dtype=str)
print('Notebook version: '+Notebook_version)
Latest_Notebook_version = All_notebook_versions[All_notebook_versions["Notebook"] == Network]['Version'].iloc[0]
print('Latest notebook version: '+Latest_Notebook_version)
if Notebook_version == Latest_Notebook_version:
print("This notebook is up-to-date.")
else:
print(bcolors.WARNING +"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki")
def pdf_export(trained = False, augmentation = False, pretrained_model = False):
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Training report for '+Network+' model ('+model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
# add another cell
if trained:
training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)"
pdf.cell(190, 5, txt = training_time, ln = 1, align='L')
pdf.ln(1)
Header_2 = 'Information for your materials and method:'
pdf.cell(190, 5, txt=Header_2, ln=1, align='L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
# print(all_packages)
#Main Packages
main_packages = ''
version_numbers = []
for name in ['tensorflow','numpy','torch']:
find_name=all_packages.find(name)
main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '
#Version numbers only here:
version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])
cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)
cuda_version = cuda_version.stdout.decode('utf-8')
cuda_version = cuda_version[cuda_version.find(', V')+3:-1]
gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)
gpu_name = gpu_name.stdout.decode('utf-8')
gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]
#print(cuda_version[cuda_version.find(', V')+3:-1])
#print(gpu_name)
shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape
dataset_size = len(os.listdir(Training_source))
text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
if pretrained_model:
text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and an least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was retrained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
pdf.set_font('')
pdf.set_font_size(10.)
pdf.multi_cell(190, 5, txt = text, align='L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(1)
pdf.cell(28, 5, txt='Augmentation: ', ln=0)
pdf.set_font('')
if augmentation:
aug_text = 'The dataset was augmented by default'
else:
aug_text = 'No augmentation was used for training.'
pdf.multi_cell(190, 5, txt=aug_text, align='L')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
if Use_Default_Advanced_Parameters:
pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')
pdf.cell(200, 5, txt='The following parameters were used for training:')
pdf.ln(1)
html = """
<table width=40% style="margin-left:0px;">
<tr>
<th width = 50% align="left">Parameter</th>
<th width = 50% align="left">Value</th>
</tr>
<tr>
<td width = 50%>number_of_epochs</td>
<td width = 50%>{0}</td>
</tr>
<tr>
<td width = 50%>patch_size</td>
<td width = 50%>{1}</td>
</tr>
<tr>
<td width = 50%>batch_size</td>
<td width = 50%>{2}</td>
</tr>
<tr>
<td width = 50%>initial_learning_rate</td>
<td width = 50%>{3}</td>
</tr>
</table>
""".format(number_of_epochs,str(patch_size)+'x'+str(patch_size),batch_size,initial_learning_rate)
pdf.write_html(html)
#pdf.multi_cell(190, 5, txt = text_2, align='L')
pdf.set_font("Arial", size = 11, style='B')
pdf.ln(1)
pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_source, align = 'L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(29, 5, txt= 'Training_target:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_target, align = 'L')
#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')
pdf.ln(1)
pdf.cell(60, 5, txt = 'Example Training pair', ln=1)
pdf.ln(1)
exp_size = io.imread('/content/TrainingDataExample_cycleGAN.png').shape
pdf.image('/content/TrainingDataExample_cycleGAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "Democratising deep learning for microscopy with ZeroCostDL4Mic." Nature Communications (2021).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: Zhu, Jun-Yan, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
# if Use_Data_augmentation:
# ref_3 = '- Augmentor: Bloice, Marcus D., Christof Stocker, and Andreas Holzinger. "Augmentor: an image augmentation library for machine learning." arXiv preprint arXiv:1708.04680 (2017).'
# pdf.multi_cell(190, 5, txt = ref_3, align='L')
pdf.ln(3)
reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(model_path+'/'+model_name+'/'+model_name+"_training_report.pdf")
def qc_pdf_export():
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'cycleGAN'
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(2)
pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png').shape
pdf.image(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(2)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(3)
pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
if Image_type == 'RGB':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/5), h = round(exp_size[0]/5))
if Image_type == 'Grayscale':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
pdf.ln(1)
for checkpoint in os.listdir(full_QC_model_path+'Quality Control'):
if os.path.isdir(os.path.join(full_QC_model_path,'Quality Control',checkpoint)):
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(70, 5, txt = 'Metrics for checkpoint: '+ str(checkpoint), align='L', ln=1)
html = """
<body>
<font size="8" face="Courier New" >
<table width=95% style="margin-left:0px;">"""
with open(full_QC_model_path+'Quality Control/'+str(checkpoint)+'/QC_metrics_'+QC_model_name+str(checkpoint)+'.csv', 'r') as csvfile:
metrics = csv.reader(csvfile)
header = next(metrics)
image = header[0]
mSSIM_PvsGT = header[1]
mSSIM_SvsGT = header[2]
header = """
<tr>
<th width = 60% align="left">{0}</th>
<th width = 20% align="center">{1}</th>
<th width = 20% align="center">{2}</th>
</tr>""".format(image,mSSIM_PvsGT,mSSIM_SvsGT)
html = html+header
for row in metrics:
image = row[0]
mSSIM_PvsGT = row[1]
mSSIM_SvsGT = row[2]
cells = """
<tr>
<td width = 60% align="left">{0}</td>
<td width = 20% align="center">{1}</td>
<td width = 20% align="center">{2}</td>
</tr>""".format(image,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)))
html = html+cells
html = html+"""</body></table>"""
pdf.write_html(html)
pdf.ln(2)
else:
continue
pdf.ln(1)
pdf.set_font('')
pdf.set_font_size(10.)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "Democratising deep learning for microscopy with ZeroCostDL4Mic." Nature Communications (2021).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: Zhu, Jun-Yan, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(full_QC_model_path+'Quality Control/'+QC_model_name+'_QC_report.pdf')
# Build requirements file for local run
after = [str(m) for m in sys.modules]
build_requirements_file(before, after)
###Output
_____no_output_____
###Markdown
**2. Initialise the Colab session**--- **2.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelerator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime setting is correct then Google did not allocate a GPU for your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
!nvidia-smi
###Output
_____no_output_____
###Markdown
**2.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Play the cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
# mount user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**3. Select your parameters and paths**--- **3.1. Setting main training parameters**--- **Paths for training, predictions and results****`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training Parameters****`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10) epochs, but a full training should run for 200 epochs or more. Evaluate the performance after training (see 5). **Default value: 200****Advanced Parameters - experienced users only****`patch_size`:** CycleGAN divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 4. **Default value: 512****When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** **`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1****`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0002**
###Code
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
InputFile = Training_source+"/*.png"
Training_target = "" #@param {type:"string"}
OutputFile = Training_target+"/*.png"
#Define where the patch file will be saved
base = "/content"
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 200#@param {type:"number"}
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
patch_size = 512#@param {type:"number"} # in pixels
batch_size = 2#@param {type:"number"}
initial_learning_rate = 0.0002 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 1
patch_size = 512
initial_learning_rate = 0.0002
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
print(bcolors.WARNING +"!! WARNING: "+model_name+" already exists and will be deleted in the following cell !!")
print(bcolors.WARNING +"To continue training "+model_name+", choose a new model_name here, and load "+model_name+" in section 3.3")
#To use Cyclegan we need to organise the data in a way the model can understand
Saving_path= "/content/"+model_name
#Saving_path= model_path+"/"+model_name
if os.path.exists(Saving_path):
shutil.rmtree(Saving_path)
os.makedirs(Saving_path)
TrainA_Folder = Saving_path+"/trainA"
if os.path.exists(TrainA_Folder):
shutil.rmtree(TrainA_Folder)
os.makedirs(TrainA_Folder)
TrainB_Folder = Saving_path+"/trainB"
if os.path.exists(TrainB_Folder):
shutil.rmtree(TrainB_Folder)
os.makedirs(TrainB_Folder)
# Here we disable pre-trained model by default (in case the cell is not ran)
Use_pretrained_model = False
# Here we disable data augmentation by default (in case the cell is not ran)
Use_Data_augmentation = True
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imageio.imread(Training_source+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
#Hyperparameters failsafes
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 4
if not patch_size % 4 == 0:
patch_size = ((int(patch_size / 4)-1) * 4)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 4; therefore the patch_size chosen is now:",patch_size)
random_choice_2 = random.choice(os.listdir(Training_target))
y = imageio.imread(Training_target+"/"+random_choice_2)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
plt.savefig('/content/TrainingDataExample_cycleGAN.png',bbox_inches='tight',pad_inches=0)
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by flipping the patches. By default data augmentation is enabled.
###Code
#Data augmentation
#@markdown ##Play this cell to enable or disable data augmentation:
Use_Data_augmentation = True #@param {type:"boolean"}
if Use_Data_augmentation:
print("Data augmentation enabled")
if not Use_Data_augmentation:
print("Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CycleGAN model**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
h5_file_path_A = os.path.join(pretrained_model_path, "latest_net_G_A.pth")
h5_file_path_B = os.path.join(pretrained_model_path, "latest_net_G_B.pth")
# --------------------- Check the model exist ------------------------
if not os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print(bcolors.WARNING+'WARNING: Pretrained model does not exist')
Use_pretrained_model = False
print(bcolors.WARNING+'No pretrained network will be used.')
if os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print("Pretrained model "+os.path.basename(pretrained_model_path)+" was found and will be loaded prior to training.")
else:
print(bcolors.WARNING+'No pretrained network will be used.')
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Prepare the training data for training**---Here, we use the information from 3. to prepare the training data into a suitable format for training.
###Code
#@markdown ##Prepare the data for training
print("Data preparation in progress")
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
os.makedirs(model_path+'/'+model_name)
#--------------- Here we move the files to trainA and train B ---------
for f in os.listdir(Training_source):
shutil.copyfile(Training_source+"/"+f, TrainA_Folder+"/"+f)
for files in os.listdir(Training_target):
shutil.copyfile(Training_target+"/"+files, TrainB_Folder+"/"+files)
#---------------------------------------------------------------------
# CycleGAN use number of EPOCH withouth lr decay and number of EPOCH with lr decay
number_of_epochs_lr_stable = int(number_of_epochs/2)
number_of_epochs_lr_decay = int(number_of_epochs/2)
if Use_pretrained_model :
for f in os.listdir(pretrained_model_path):
if (f.startswith("latest_net_")):
shutil.copyfile(pretrained_model_path+"/"+f, model_path+'/'+model_name+"/"+f)
pdf_export(augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
print("Data ready for training")
###Output
_____no_output_____
###Markdown
**4.2. Start Training**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches or continue the training in a second Colab session.Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder from Google Drive as all data can be erased at the next training if using the same folder.
###Code
#@markdown ##Start training
start = time.time()
os.chdir("/content")
#--------------------------------- Command line inputs to change CycleGAN paramaters------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# visdom and HTML visualization parameters
#('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
#('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
#('--display_id', type=int, default=1, help='window id of the web display')
#('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
#('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
#('--display_port', type=int, default=8097, help='visdom port of the web display')
#('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
#('--print_freq', type=int, default=100, help='frequency of showing training results on console')
#('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
#('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
#('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
#('--save_by_iter', action='store_true', help='whether saves model by iteration')
#('--continue_train', action='store_true', help='continue training: load the latest model')
#('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
#('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
#('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
#('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
#('--beta1', type=float, default=0.5, help='momentum term of adam')
#('--lr', type=float, default=0.0002, help='initial learning rate for adam')
#('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
#('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
#('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
#('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations'
#---------------------------------------------------------
#----- Start the training ------------------------------------
if not Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --no_flip
if Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train --no_flip
#---------------------------------------------------------
print("Training, done.")
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
# Save training summary as pdf
pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
###Output
_____no_output_____
###Markdown
**5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**Unfortunately loss functions curve are not very informative for GAN network. Therefore we perform the QC here using a test dataset. **5.1. Choose the model you want to assess**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.2. Identify the best checkpoint to use to make predictions** CycleGAN save model checkpoints every five epochs. Due to the stochastic nature of GAN networks, the last checkpoint is not always the best one to use. As a consequence, it can be challenging to choose the most suitable checkpoint to use to make predictions.This section allows you to perform predictions using all the saved checkpoints and to estimate the quality of these predictions by comparing them to the provided ground truths images. Metric used include:**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
Image_type = "Grayscale" #@param ["Grayscale", "RGB"]
# average function
def Average(lst):
return sum(lst) / len(lst)
# Create a quality control folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control")
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_QC= "/content/"+QC_model_name
if os.path.exists(Saving_path_QC):
shutil.rmtree(Saving_path_QC)
os.makedirs(Saving_path_QC)
Saving_path_QC_folder = Saving_path_QC+"_images"
if os.path.exists(Saving_path_QC_folder):
shutil.rmtree(Saving_path_QC_folder)
os.makedirs(Saving_path_QC_folder)
#Here we copy and rename the all the checkpoint to be analysed
for f in os.listdir(full_QC_model_path):
shortname = f[:-6]
shortname = shortname + ".pth"
if f.endswith("net_G_A.pth"):
shutil.copyfile(full_QC_model_path+f, Saving_path_QC+"/"+shortname)
for files in os.listdir(Source_QC_folder):
shutil.copyfile(Source_QC_folder+"/"+files, Saving_path_QC_folder+"/"+files)
# This will find the image dimension of a randomly chosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = int(min(Image_Y, Image_X))
Nb_Checkpoint = len(os.listdir(Saving_path_QC))
print(Nb_Checkpoint)
## Initiate list
Checkpoint_list = []
Average_ssim_score_list = []
for j in range(1, len(os.listdir(Saving_path_QC))+1):
checkpoints = j*5
if checkpoints == Nb_Checkpoint*5:
checkpoints = "latest"
print("The checkpoint currently analysed is ="+str(checkpoints))
Checkpoint_list.append(checkpoints)
# Create a quality control/Prediction Folder
QC_prediction_results = QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)
if os.path.exists(QC_prediction_results):
shutil.rmtree(QC_prediction_results)
os.makedirs(QC_prediction_results)
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_QC_folder" --name "$QC_model_name" --model test --epoch $checkpoints --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$QC_prediction_results" --checkpoints_dir "/content/"
#-----------------------------------------------------------------------------------
#Here we need to move the data again and remove all the unnecessary folders
Checkpoint_name = "test_"+str(checkpoints)
QC_results_images = QC_prediction_results+"/"+QC_model_name+"/"+Checkpoint_name+"/images"
QC_results_images_files = os.listdir(QC_results_images)
for f in QC_results_images_files:
shutil.copyfile(QC_results_images+"/"+f, QC_prediction_results+"/"+f)
os.chdir("/content")
#Here we clean up the extra files
shutil.rmtree(QC_prediction_results+"/"+QC_model_name)
#-------------------------------- QC for RGB ------------------------------------
if Image_type == "RGB":
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, multichannel=True)
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM"])
# Initiate list
ssim_score_list = []
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
# -------------------------------- Source test data --------------------------------
test_source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
# -------------------------------- Prediction --------------------------------
test_prediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
#--------------------------- Here we normalise using histograms matching--------------------------------
test_prediction_matched = match_histograms(test_prediction, test_GT, multichannel=True)
test_source_matched = match_histograms(test_source, test_GT, multichannel=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT, test_prediction_matched)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT, test_source_matched)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
#------------------------------------------- QC for Grayscale ----------------------------------------------
if Image_type == "Grayscale":
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
ssim_score_list = []
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT_raw = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
test_GT = test_GT_raw[:,:,2]
# -------------------------------- Source test data --------------------------------
test_source_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
test_source = test_source_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
test_prediction = test_prediction_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_8bit = (img_RSE_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsPrediction_"+shortname_no_PNG+'.tif',img_RSE_GTvsPrediction_8bit)
img_RSE_GTvsSource_8bit = (img_RSE_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsSource_"+shortname_no_PNG+'.tif',img_RSE_GTvsSource_8bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
# All data is now processed saved
# -------------------------------- Display --------------------------------
# Display the IoV vs Threshold plot
plt.figure(figsize=(20,5))
plt.plot(Checkpoint_list, Average_ssim_score_list, label="SSIM")
plt.title('Checkpoints vs. SSIM')
plt.ylabel('SSIM')
plt.xlabel('Checkpoints')
plt.legend()
plt.savefig(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png',bbox_inches='tight',pad_inches=0)
plt.show()
# -------------------------------- Display RGB --------------------------------
from ipywidgets import interact
import ipywidgets as widgets
if Image_type == "RGB":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
#Setting up colours
cmap = None
plt.figure(figsize=(10,10))
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_GT, cmap = cmap)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(Source_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_Source, cmap = cmap)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, cmap = cmap)
plt.title('Prediction',fontsize=15)
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
#plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
#plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
# -------------------------------- Display Grayscale --------------------------------
if Image_type == "Grayscale":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
NRMSE_GTvsPrediction = df2.loc[file, "Prediction v. GT NRMSE"]
NRMSE_GTvsSource = df2.loc[file, "Input v. GT NRMSE"]
PSNR_GTvsSource = df2.loc[file, "Input v. GT PSNR"]
PSNR_GTvsPrediction = df2.loc[file, "Prediction v. GT PSNR"]
plt.figure(figsize=(15,15))
cmap = None
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=True, pilmode="RGB")
plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99), cmap = 'gray')
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_real.png"))
plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsSource = img_SSIM_GTvsSource / 255
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsPrediction = img_SSIM_GTvsPrediction / 255
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsSource = img_RSE_GTvsSource / 255
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsPrediction = img_RSE_GTvsPrediction / 255
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'/Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
#Make a pdf summary of the QC results
qc_pdf_export()
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as PNG images.**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.**`checkpoint`:** Choose the checkpoint number you would like to use to perform predictions. To use the "latest" checkpoint, input "latest".
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
import glob
import os.path
latest = "latest"
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
Prediction_model_folder = "" #@param {type:"string"}
#@markdown ###What model checkpoint would you like to use?
checkpoint = latest#@param {type:"raw"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
#here we check if we use the newly trained network or not
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
#here we check if the model exists
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Here we check that checkpoint exist, if not the closest one will be chosen
Nb_Checkpoint = len(glob.glob(os.path.join(full_Prediction_model_path, '*G_A.pth')))
print(Nb_Checkpoint)
if not checkpoint == "latest":
if checkpoint < 10:
checkpoint = 5
if not checkpoint % 5 == 0:
checkpoint = ((int(checkpoint / 5)-1) * 5)
print (bcolors.WARNING + " Your chosen checkpoints is not divisible by 5; therefore the checkpoints chosen is now:",checkpoints)
if checkpoint > Nb_Checkpoint*5:
checkpoint = "latest"
if checkpoint == Nb_Checkpoint*5:
checkpoint = "latest"
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_prediction= "/content/"+Prediction_model_name
if os.path.exists(Saving_path_prediction):
shutil.rmtree(Saving_path_prediction)
os.makedirs(Saving_path_prediction)
Saving_path_Data_folder = Saving_path_prediction+"/testA"
if os.path.exists(Saving_path_Data_folder):
shutil.rmtree(Saving_path_Data_folder)
os.makedirs(Saving_path_Data_folder)
for files in os.listdir(Data_folder):
shutil.copyfile(Data_folder+"/"+files, Saving_path_Data_folder+"/"+files)
Nb_files_Data_folder = len(os.listdir(Data_folder)) +10
#Here we copy and rename the checkpoint to be used
shutil.copyfile(full_Prediction_model_path+"/"+str(checkpoint)+"_net_G_A.pth", full_Prediction_model_path+"/"+str(checkpoint)+"_net_G.pth")
# This will find the image dimension of a randomly choosen image in Data_folder
random_choice = random.choice(os.listdir(Data_folder))
x = imageio.imread(Data_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
print(Image_min_dim)
#-------------------------------- Perform predictions -----------------------------
#-------------------------------- Options that can be used to perform predictions -----------------------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
#('--ntest', type=int, default=float("inf"), help='# of test examples.')
#('--results_dir', type=str, default='./results/', help='saves results here.')
#('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
#('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
#('--eval', action='store_true', help='use eval mode during test time.')
#('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
# To avoid cropping, the load_size should be the same as crop_size
#parser.set_defaults(load_size=parser.get_default('crop_size'))
#------------------------------------------------------------------------
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_Data_folder" --name "$Prediction_model_name" --model test --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$Result_folder" --checkpoints_dir "$Prediction_model_path" --num_test $Nb_files_Data_folder --epoch $checkpoint
#-----------------------------------------------------------------------------------
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
import os
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
random_choice_no_extension = os.path.splitext(random_choice)
x = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_real.png")
y = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_fake.png")
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Prediction')
plt.axis('off');
###Output
_____no_output_____
###Markdown
**CycleGAN**---CycleGAN is a method that can capture the characteristics of one image domain and learn how these characteristics can be translated into another image domain, all in the absence of any paired training examples. It was first published by [Zhu *et al.* in 2017](https://arxiv.org/abs/1703.10593). Unlike pix2pix, the image transformation performed does not require paired images for training (unsupervised learning) and is made possible here by using a set of two Generative Adversarial Networks (GANs) that learn to transform images both from the first domain to the second and vice-versa. **This particular notebook enables unpaired image-to-image translation. If your dataset is paired, you should also consider using the pix2pix notebook.**---*Disclaimer*:This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.This notebook is based on the following paper: **Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks** from Zhu *et al.* published in arXiv in 2018 (https://arxiv.org/abs/1703.10593)The source code of the CycleGAN PyTorch implementation can be found in: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix**Please also cite this original paper when using or developing this notebook.** **License**---
###Code
#@markdown ##Double click to see the license information
#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------
#This ZeroCostDL4Mic notebook is distributed under the MIT licence
#------------------------- LICENSE FOR CycleGAN ------------------------------------
#Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#--------------------------- LICENSE FOR pix2pix --------------------------------
#BSD License
#For pix2pix software
#Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#----------------------------- LICENSE FOR DCGAN --------------------------------
#BSD License
#For dcgan.torch software
#Copyright (c) 2015, Facebook, Inc. All rights reserved.
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###Output
_____no_output_____
###Markdown
**0. Before getting started**--- To train CycleGAN, **you only need two folders containing PNG images**. The images do not need to be paired.While you do not need paired images to train CycleGAN, if possible, **we strongly recommend that you generate a paired dataset. This means that the same image needs to be acquired in the two conditions. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. Please note that you currently can **only use .png files!**Here's a common data structure that can work:* Experiment A - **Training dataset (non-matching images)** - Training_source - img_1.png, img_2.png, ... - Training_target - img_1.png, img_2.png, ... - **Quality control dataset (matching images)** - Training_source - img_1.png, img_2.png - Training_target - img_1.png, img_2.png - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Initialise the Colab session**--- **1.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelerator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime setting is correct then Google did not allocate a GPU for your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
!nvidia-smi
###Output
_____no_output_____
###Markdown
**1.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Play the cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
# mount user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**2. Install CycleGAN and dependencies**---
###Code
Notebook_version = ['1.12']
#@markdown ##Install CycleGAN and dependencies
#------- Code from the cycleGAN demo notebook starts here -------
#Here, we install libraries which are not already included in Colab.
import sys
before = [str(m) for m in sys.modules]
!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
import os
os.chdir('pytorch-CycleGAN-and-pix2pix/')
!pip install -r requirements.txt
!pip install fpdf
import imageio
from skimage import data
from skimage import exposure
from skimage.exposure import match_histograms
from skimage.util import img_as_int
# ------- Common variable to all ZeroCostDL4Mic notebooks -------
import numpy as np
from matplotlib import pyplot as plt
import urllib
import os, random
import shutil
import zipfile
from tifffile import imread, imsave
import time
import sys
from pathlib import Path
import pandas as pd
import csv
from glob import glob
from scipy import signal
from scipy import ndimage
from skimage import io
from sklearn.linear_model import LinearRegression
from skimage.util import img_as_uint
import matplotlib as mpl
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from astropy.visualization import simple_norm
from skimage import img_as_float32
from skimage.util import img_as_ubyte
from tqdm import tqdm
from fpdf import FPDF, HTMLMixin
from datetime import datetime
from pip._internal.operations.freeze import freeze
import subprocess
# Colors for the warning messages
class bcolors:
WARNING = '\033[31m'
#Disable some of the tensorflow warnings
import warnings
warnings.filterwarnings("ignore")
print("Libraries installed")
# Check if this is the latest version of the notebook
Latest_notebook_version = pd.read_csv("https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_ZeroCostDL4Mic_Release.csv")
if Notebook_version == list(Latest_notebook_version.columns):
print("This notebook is up-to-date.")
if not Notebook_version == list(Latest_notebook_version.columns):
print(bcolors.WARNING +"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki")
def pdf_export(trained = False, augmentation = False, pretrained_model = False):
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'cycleGAN'
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Training report for '+Network+' model ('+model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
# add another cell
if trained:
training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)"
pdf.cell(190, 5, txt = training_time, ln = 1, align='L')
pdf.ln(1)
Header_2 = 'Information for your materials and method:'
pdf.cell(190, 5, txt=Header_2, ln=1, align='L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
#print(all_packages)
#Main Packages
main_packages = ''
version_numbers = []
for name in ['tensorflow','numpy','torch']:
find_name=all_packages.find(name)
main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '
#Version numbers only here:
version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])
cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)
cuda_version = cuda_version.stdout.decode('utf-8')
cuda_version = cuda_version[cuda_version.find(', V')+3:-1]
gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)
gpu_name = gpu_name.stdout.decode('utf-8')
gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]
#print(cuda_version[cuda_version.find(', V')+3:-1])
#print(gpu_name)
shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape
dataset_size = len(os.listdir(Training_source))
text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
if pretrained_model:
text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and an least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was retrained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
pdf.set_font('')
pdf.set_font_size(10.)
pdf.multi_cell(190, 5, txt = text, align='L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(1)
pdf.cell(28, 5, txt='Augmentation: ', ln=0)
pdf.set_font('')
if augmentation:
aug_text = 'The dataset was augmented by default'
else:
aug_text = 'No augmentation was used for training.'
pdf.multi_cell(190, 5, txt=aug_text, align='L')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
if Use_Default_Advanced_Parameters:
pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')
pdf.cell(200, 5, txt='The following parameters were used for training:')
pdf.ln(1)
html = """
<table width=40% style="margin-left:0px;">
<tr>
<th width = 50% align="left">Parameter</th>
<th width = 50% align="left">Value</th>
</tr>
<tr>
<td width = 50%>number_of_epochs</td>
<td width = 50%>{0}</td>
</tr>
<tr>
<td width = 50%>patch_size</td>
<td width = 50%>{1}</td>
</tr>
<tr>
<td width = 50%>batch_size</td>
<td width = 50%>{2}</td>
</tr>
<tr>
<td width = 50%>initial_learning_rate</td>
<td width = 50%>{3}</td>
</tr>
</table>
""".format(number_of_epochs,str(patch_size)+'x'+str(patch_size),batch_size,initial_learning_rate)
pdf.write_html(html)
#pdf.multi_cell(190, 5, txt = text_2, align='L')
pdf.set_font("Arial", size = 11, style='B')
pdf.ln(1)
pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_source, align = 'L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(29, 5, txt= 'Training_target:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_target, align = 'L')
#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')
pdf.ln(1)
pdf.cell(60, 5, txt = 'Example Training pair', ln=1)
pdf.ln(1)
exp_size = io.imread('/content/TrainingDataExample_cycleGAN.png').shape
pdf.image('/content/TrainingDataExample_cycleGAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: Zhu, Jun-Yan, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
# if Use_Data_augmentation:
# ref_3 = '- Augmentor: Bloice, Marcus D., Christof Stocker, and Andreas Holzinger. "Augmentor: an image augmentation library for machine learning." arXiv preprint arXiv:1708.04680 (2017).'
# pdf.multi_cell(190, 5, txt = ref_3, align='L')
pdf.ln(3)
reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(model_path+'/'+model_name+'/'+model_name+"_training_report.pdf")
def qc_pdf_export():
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'cycleGAN'
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(2)
pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png').shape
pdf.image(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(2)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(3)
pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
if Image_type == 'RGB':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/5), h = round(exp_size[0]/5))
if Image_type == 'Grayscale':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
pdf.ln(1)
for checkpoint in os.listdir(full_QC_model_path+'Quality Control'):
if os.path.isdir(os.path.join(full_QC_model_path,'Quality Control',checkpoint)):
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(70, 5, txt = 'Metrics for checkpoint: '+ str(checkpoint), align='L', ln=1)
html = """
<body>
<font size="8" face="Courier New" >
<table width=95% style="margin-left:0px;">"""
with open(full_QC_model_path+'Quality Control/'+str(checkpoint)+'/QC_metrics_'+QC_model_name+str(checkpoint)+'.csv', 'r') as csvfile:
metrics = csv.reader(csvfile)
header = next(metrics)
image = header[0]
mSSIM_PvsGT = header[1]
mSSIM_SvsGT = header[2]
header = """
<tr>
<th width = 60% align="left">{0}</th>
<th width = 20% align="center">{1}</th>
<th width = 20% align="center">{2}</th>
</tr>""".format(image,mSSIM_PvsGT,mSSIM_SvsGT)
html = html+header
for row in metrics:
image = row[0]
mSSIM_PvsGT = row[1]
mSSIM_SvsGT = row[2]
cells = """
<tr>
<td width = 60% align="left">{0}</td>
<td width = 20% align="center">{1}</td>
<td width = 20% align="center">{2}</td>
</tr>""".format(image,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)))
html = html+cells
html = html+"""</body></table>"""
pdf.write_html(html)
pdf.ln(2)
else:
continue
pdf.ln(1)
pdf.set_font('')
pdf.set_font_size(10.)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: Zhu, Jun-Yan, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(full_QC_model_path+'Quality Control/'+QC_model_name+'_QC_report.pdf')
# Exporting requirements.txt for local run
!pip freeze > ../requirements.txt
after = [str(m) for m in sys.modules]
# Get minimum requirements file
#Add the following lines before all imports:
# import sys
# before = [str(m) for m in sys.modules]
#Add the following line after the imports:
# after = [str(m) for m in sys.modules]
from builtins import any as b_any
def filter_files(file_list, filter_list):
filtered_list = []
for fname in file_list:
if b_any(fname.split('==')[0] in s for s in filter_list):
filtered_list.append(fname)
return filtered_list
df = pd.read_csv('../requirements.txt', delimiter = "\n")
mod_list = [m.split('.')[0] for m in after if not m in before]
req_list_temp = df.values.tolist()
req_list = [x[0] for x in req_list_temp]
# Replace with package name
mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]
mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]
filtered_list = filter_files(req_list, mod_replace_list)
file=open('../CycleGAN_requirements_simple.txt','w')
for item in filtered_list:
file.writelines(item + '\n')
file.close()
###Output
_____no_output_____
###Markdown
**3. Select your parameters and paths**--- **3.1. Setting main training parameters**--- **Paths for training, predictions and results****`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training Parameters****`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10) epochs, but a full training should run for 200 epochs or more. Evaluate the performance after training (see 5). **Default value: 200****Advanced Parameters - experienced users only****`patch_size`:** CycleGAN divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 4. **Default value: 512****When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** **`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1****`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0002**
###Code
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
InputFile = Training_source+"/*.png"
Training_target = "" #@param {type:"string"}
OutputFile = Training_target+"/*.png"
#Define where the patch file will be saved
base = "/content"
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 200#@param {type:"number"}
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
patch_size = 512#@param {type:"number"} # in pixels
batch_size = 1#@param {type:"number"}
initial_learning_rate = 0.0002 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 1
patch_size = 512
initial_learning_rate = 0.0002
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
print(bcolors.WARNING +"!! WARNING: "+model_name+" already exists and will be deleted in the following cell !!")
print(bcolors.WARNING +"To continue training "+model_name+", choose a new model_name here, and load "+model_name+" in section 3.3")
#To use Cyclegan we need to organise the data in a way the model can understand
Saving_path= "/content/"+model_name
#Saving_path= model_path+"/"+model_name
if os.path.exists(Saving_path):
shutil.rmtree(Saving_path)
os.makedirs(Saving_path)
TrainA_Folder = Saving_path+"/trainA"
if os.path.exists(TrainA_Folder):
shutil.rmtree(TrainA_Folder)
os.makedirs(TrainA_Folder)
TrainB_Folder = Saving_path+"/trainB"
if os.path.exists(TrainB_Folder):
shutil.rmtree(TrainB_Folder)
os.makedirs(TrainB_Folder)
# Here we disable pre-trained model by default (in case the cell is not ran)
Use_pretrained_model = False
# Here we disable data augmentation by default (in case the cell is not ran)
Use_Data_augmentation = True
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imageio.imread(Training_source+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
#Hyperparameters failsafes
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 4
if not patch_size % 4 == 0:
patch_size = ((int(patch_size / 4)-1) * 4)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 4; therefore the patch_size chosen is now:",patch_size)
random_choice_2 = random.choice(os.listdir(Training_target))
y = imageio.imread(Training_target+"/"+random_choice_2)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
plt.savefig('/content/TrainingDataExample_cycleGAN.png',bbox_inches='tight',pad_inches=0)
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by flipping the patches. By default data augmentation is enabled.
###Code
#Data augmentation
#@markdown ##Play this cell to enable or disable data augmentation:
Use_Data_augmentation = True #@param {type:"boolean"}
if Use_Data_augmentation:
print("Data augmentation enabled")
if not Use_Data_augmentation:
print("Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CycleGAN model**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
h5_file_path_A = os.path.join(pretrained_model_path, "latest_net_G_A.pth")
h5_file_path_B = os.path.join(pretrained_model_path, "latest_net_G_B.pth")
# --------------------- Check the model exist ------------------------
if not os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print(bcolors.WARNING+'WARNING: Pretrained model does not exist')
Use_pretrained_model = False
print(bcolors.WARNING+'No pretrained network will be used.')
if os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print("Pretrained model "+os.path.basename(pretrained_model_path)+" was found and will be loaded prior to training.")
else:
print(bcolors.WARNING+'No pretrained network will be used.')
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Prepare the training data for training**---Here, we use the information from 3. to prepare the training data into a suitable format for training.
###Code
#@markdown ##Prepare the data for training
print("Data preparation in progress")
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
os.makedirs(model_path+'/'+model_name)
#--------------- Here we move the files to trainA and train B ---------
for f in os.listdir(Training_source):
shutil.copyfile(Training_source+"/"+f, TrainA_Folder+"/"+f)
for files in os.listdir(Training_target):
shutil.copyfile(Training_target+"/"+files, TrainB_Folder+"/"+files)
#---------------------------------------------------------------------
# CycleGAN use number of EPOCH withouth lr decay and number of EPOCH with lr decay
number_of_epochs_lr_stable = int(number_of_epochs/2)
number_of_epochs_lr_decay = int(number_of_epochs/2)
if Use_pretrained_model :
for f in os.listdir(pretrained_model_path):
if (f.startswith("latest_net_")):
shutil.copyfile(pretrained_model_path+"/"+f, model_path+'/'+model_name+"/"+f)
pdf_export(augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
print("Data ready for training")
###Output
_____no_output_____
###Markdown
**4.2. Start Training**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches or continue the training in a second Colab session.Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder from Google Drive as all data can be erased at the next training if using the same folder.
###Code
#@markdown ##Start training
start = time.time()
os.chdir("/content")
#--------------------------------- Command line inputs to change CycleGAN paramaters------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# visdom and HTML visualization parameters
#('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
#('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
#('--display_id', type=int, default=1, help='window id of the web display')
#('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
#('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
#('--display_port', type=int, default=8097, help='visdom port of the web display')
#('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
#('--print_freq', type=int, default=100, help='frequency of showing training results on console')
#('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
#('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
#('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
#('--save_by_iter', action='store_true', help='whether saves model by iteration')
#('--continue_train', action='store_true', help='continue training: load the latest model')
#('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
#('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
#('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
#('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
#('--beta1', type=float, default=0.5, help='momentum term of adam')
#('--lr', type=float, default=0.0002, help='initial learning rate for adam')
#('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
#('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
#('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
#('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations'
#---------------------------------------------------------
#----- Start the training ------------------------------------
if not Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --no_flip
if Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train --no_flip
#---------------------------------------------------------
print("Training, done.")
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
# Save training summary as pdf
pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
###Output
_____no_output_____
###Markdown
**5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**Unfortunately loss functions curve are not very informative for GAN network. Therefore we perform the QC here using a test dataset. **5.1. Choose the model you want to assess**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.2. Identify the best checkpoint to use to make predictions** CycleGAN save model checkpoints every five epochs. Due to the stochastic nature of GAN networks, the last checkpoint is not always the best one to use. As a consequence, it can be challenging to choose the most suitable checkpoint to use to make predictions.This section allows you to perform predictions using all the saved checkpoints and to estimate the quality of these predictions by comparing them to the provided ground truths images. Metric used include:**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
Image_type = "Grayscale" #@param ["Grayscale", "RGB"]
# average function
def Average(lst):
return sum(lst) / len(lst)
# Create a quality control folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control")
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_QC= "/content/"+QC_model_name
if os.path.exists(Saving_path_QC):
shutil.rmtree(Saving_path_QC)
os.makedirs(Saving_path_QC)
Saving_path_QC_folder = Saving_path_QC+"_images"
if os.path.exists(Saving_path_QC_folder):
shutil.rmtree(Saving_path_QC_folder)
os.makedirs(Saving_path_QC_folder)
#Here we copy and rename the all the checkpoint to be analysed
for f in os.listdir(full_QC_model_path):
shortname = f[:-6]
shortname = shortname + ".pth"
if f.endswith("net_G_A.pth"):
shutil.copyfile(full_QC_model_path+f, Saving_path_QC+"/"+shortname)
for files in os.listdir(Source_QC_folder):
shutil.copyfile(Source_QC_folder+"/"+files, Saving_path_QC_folder+"/"+files)
# This will find the image dimension of a randomly chosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = int(min(Image_Y, Image_X))
Nb_Checkpoint = len(os.listdir(Saving_path_QC))
print(Nb_Checkpoint)
## Initiate list
Checkpoint_list = []
Average_ssim_score_list = []
for j in range(1, len(os.listdir(Saving_path_QC))+1):
checkpoints = j*5
if checkpoints == Nb_Checkpoint*5:
checkpoints = "latest"
print("The checkpoint currently analysed is ="+str(checkpoints))
Checkpoint_list.append(checkpoints)
# Create a quality control/Prediction Folder
QC_prediction_results = QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)
if os.path.exists(QC_prediction_results):
shutil.rmtree(QC_prediction_results)
os.makedirs(QC_prediction_results)
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_QC_folder" --name "$QC_model_name" --model test --epoch $checkpoints --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$QC_prediction_results" --checkpoints_dir "/content/"
#-----------------------------------------------------------------------------------
#Here we need to move the data again and remove all the unnecessary folders
Checkpoint_name = "test_"+str(checkpoints)
QC_results_images = QC_prediction_results+"/"+QC_model_name+"/"+Checkpoint_name+"/images"
QC_results_images_files = os.listdir(QC_results_images)
for f in QC_results_images_files:
shutil.copyfile(QC_results_images+"/"+f, QC_prediction_results+"/"+f)
os.chdir("/content")
#Here we clean up the extra files
shutil.rmtree(QC_prediction_results+"/"+QC_model_name)
#-------------------------------- QC for RGB ------------------------------------
if Image_type == "RGB":
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, multichannel=True)
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM"])
# Initiate list
ssim_score_list = []
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
# -------------------------------- Source test data --------------------------------
test_source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
# -------------------------------- Prediction --------------------------------
test_prediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
#--------------------------- Here we normalise using histograms matching--------------------------------
test_prediction_matched = match_histograms(test_prediction, test_GT, multichannel=True)
test_source_matched = match_histograms(test_source, test_GT, multichannel=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT, test_prediction_matched)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT, test_source_matched)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
#------------------------------------------- QC for Grayscale ----------------------------------------------
if Image_type == "Grayscale":
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
ssim_score_list = []
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT_raw = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
test_GT = test_GT_raw[:,:,2]
# -------------------------------- Source test data --------------------------------
test_source_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
test_source = test_source_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
test_prediction = test_prediction_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_8bit = (img_RSE_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsPrediction_"+shortname_no_PNG+'.tif',img_RSE_GTvsPrediction_8bit)
img_RSE_GTvsSource_8bit = (img_RSE_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsSource_"+shortname_no_PNG+'.tif',img_RSE_GTvsSource_8bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
# All data is now processed saved
# -------------------------------- Display --------------------------------
# Display the IoV vs Threshold plot
plt.figure(figsize=(20,5))
plt.plot(Checkpoint_list, Average_ssim_score_list, label="SSIM")
plt.title('Checkpoints vs. SSIM')
plt.ylabel('SSIM')
plt.xlabel('Checkpoints')
plt.legend()
plt.savefig(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png',bbox_inches='tight',pad_inches=0)
plt.show()
# -------------------------------- Display RGB --------------------------------
from ipywidgets import interact
import ipywidgets as widgets
if Image_type == "RGB":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
#Setting up colours
cmap = None
plt.figure(figsize=(10,10))
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_GT, cmap = cmap)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(Source_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_Source, cmap = cmap)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, cmap = cmap)
plt.title('Prediction',fontsize=15)
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
#plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
#plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
# -------------------------------- Display Grayscale --------------------------------
if Image_type == "Grayscale":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
NRMSE_GTvsPrediction = df2.loc[file, "Prediction v. GT NRMSE"]
NRMSE_GTvsSource = df2.loc[file, "Input v. GT NRMSE"]
PSNR_GTvsSource = df2.loc[file, "Input v. GT PSNR"]
PSNR_GTvsPrediction = df2.loc[file, "Prediction v. GT PSNR"]
plt.figure(figsize=(15,15))
cmap = None
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=True, pilmode="RGB")
plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99), cmap = 'gray')
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_real.png"))
plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsSource = img_SSIM_GTvsSource / 255
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsPrediction = img_SSIM_GTvsPrediction / 255
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsSource = img_RSE_GTvsSource / 255
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsPrediction = img_RSE_GTvsPrediction / 255
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'/Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
#Make a pdf summary of the QC results
qc_pdf_export()
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as PNG images.**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.**`checkpoint`:** Choose the checkpoint number you would like to use to perform predictions. To use the "latest" checkpoint, input "latest".
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
import glob
import os.path
latest = "latest"
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
Prediction_model_folder = "" #@param {type:"string"}
#@markdown ###What model checkpoint would you like to use?
checkpoint = latest#@param {type:"raw"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
#here we check if we use the newly trained network or not
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
#here we check if the model exists
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Here we check that checkpoint exist, if not the closest one will be chosen
Nb_Checkpoint = len(glob.glob(os.path.join(full_Prediction_model_path, '*G_A.pth')))
print(Nb_Checkpoint)
if not checkpoint == "latest":
if checkpoint < 10:
checkpoint = 5
if not checkpoint % 5 == 0:
checkpoint = ((int(checkpoint / 5)-1) * 5)
print (bcolors.WARNING + " Your chosen checkpoints is not divisible by 5; therefore the checkpoints chosen is now:",checkpoints)
if checkpoint > Nb_Checkpoint*5:
checkpoint = "latest"
if checkpoint == Nb_Checkpoint*5:
checkpoint = "latest"
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_prediction= "/content/"+Prediction_model_name
if os.path.exists(Saving_path_prediction):
shutil.rmtree(Saving_path_prediction)
os.makedirs(Saving_path_prediction)
Saving_path_Data_folder = Saving_path_prediction+"/testA"
if os.path.exists(Saving_path_Data_folder):
shutil.rmtree(Saving_path_Data_folder)
os.makedirs(Saving_path_Data_folder)
for files in os.listdir(Data_folder):
shutil.copyfile(Data_folder+"/"+files, Saving_path_Data_folder+"/"+files)
Nb_files_Data_folder = len(os.listdir(Data_folder)) +10
#Here we copy and rename the checkpoint to be used
shutil.copyfile(full_Prediction_model_path+"/"+str(checkpoint)+"_net_G_A.pth", full_Prediction_model_path+"/"+str(checkpoint)+"_net_G.pth")
# This will find the image dimension of a randomly choosen image in Data_folder
random_choice = random.choice(os.listdir(Data_folder))
x = imageio.imread(Data_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
print(Image_min_dim)
#-------------------------------- Perform predictions -----------------------------
#-------------------------------- Options that can be used to perform predictions -----------------------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
#('--ntest', type=int, default=float("inf"), help='# of test examples.')
#('--results_dir', type=str, default='./results/', help='saves results here.')
#('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
#('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
#('--eval', action='store_true', help='use eval mode during test time.')
#('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
# To avoid cropping, the load_size should be the same as crop_size
#parser.set_defaults(load_size=parser.get_default('crop_size'))
#------------------------------------------------------------------------
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_Data_folder" --name "$Prediction_model_name" --model test --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$Result_folder" --checkpoints_dir "$Prediction_model_path" --num_test $Nb_files_Data_folder --epoch $checkpoint
#-----------------------------------------------------------------------------------
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
import os
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
random_choice_no_extension = os.path.splitext(random_choice)
x = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_real.png")
y = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_fake.png")
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Prediction')
plt.axis('off');
###Output
_____no_output_____
###Markdown
**CycleGAN**---CycleGAN is a method that can capture the characteristics of one image domain and learn how these characteristics can be translated into another image domain, all in the absence of any paired training examples. It was first published by [Zhu *et al.* in 2017](https://arxiv.org/abs/1703.10593). Unlike pix2pix, the image transformation performed does not require paired images for training (unsupervised learning) and is made possible here by using a set of two Generative Adversarial Networks (GANs) that learn to transform images both from the first domain to the second and vice-versa. **This particular notebook enables unpaired image-to-image translation. If your dataset is paired, you should also consider using the pix2pix notebook.**---*Disclaimer*:This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.This notebook is based on the following paper: **Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks** from Zhu *et al.* published in arXiv in 2018 (https://arxiv.org/abs/1703.10593)The source code of the CycleGAN PyTorch implementation can be found in: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix**Please also cite this original paper when using or developing this notebook.** **License**---
###Code
#@markdown ##Double click to see the license information
#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------
#This ZeroCostDL4Mic notebook is distributed under the MIT licence
#------------------------- LICENSE FOR CycleGAN ------------------------------------
#Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#--------------------------- LICENSE FOR pix2pix --------------------------------
#BSD License
#For pix2pix software
#Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#----------------------------- LICENSE FOR DCGAN --------------------------------
#BSD License
#For dcgan.torch software
#Copyright (c) 2015, Facebook, Inc. All rights reserved.
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###Output
_____no_output_____
###Markdown
**How to use this notebook?**---Video describing how to use our notebooks are available on youtube: - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook---**Structure of a notebook**The notebook contains two types of cell: **Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.---**Table of contents, Code snippets** and **Files**On the top left side of the notebook you find three tabs which contain from top to bottom:*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. **Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here!---**Making changes to the notebook****You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).You can use the ``-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment. **0. Before getting started**--- To train CycleGAN, **you only need two folders containing PNG images**. The images do not need to be paired.While you do not need paired images to train CycleGAN, if possible, **we strongly recommend that you generate a paired dataset. This means that the same image needs to be acquired in the two conditions. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. Please note that you currently can **only use .png files!**Here's a common data structure that can work:* Experiment A - **Training dataset (non-matching images) ** - Training_source - img_1.png, img_2.png, ... - Training_target - img_1.png, img_2.png, ... - **Quality control dataset (matching images)** - Training_source - img_1.png, img_2.png - Training_target - img_1.png, img_2.png - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Initialise the Colab session**--- **1.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime setting is correct then Google did not allocate a GPU for your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
!nvidia-smi
###Output
_____no_output_____
###Markdown
**1.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Run this cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
#mounts user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**2. Install CycleGAN and dependencies**---
###Code
#@markdown ##Install CycleGAN and dependencies
#------- Code from the cycleGAN demo notebook starts here -------
#Here, we install libraries which are not already included in Colab.
!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
import os
os.chdir('pytorch-CycleGAN-and-pix2pix/')
!pip install -r requirements.txt
import imageio
from skimage import data
from skimage import exposure
from skimage.exposure import match_histograms
from skimage.util import img_as_int
# ------- Common variable to all ZeroCostDL4Mic notebooks -------
import numpy as np
from matplotlib import pyplot as plt
import urllib
import os, random
import shutil
import zipfile
from tifffile import imread, imsave
import time
import sys
from pathlib import Path
import pandas as pd
import csv
from glob import glob
from scipy import signal
from scipy import ndimage
from skimage import io
from sklearn.linear_model import LinearRegression
from skimage.util import img_as_uint
import matplotlib as mpl
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from astropy.visualization import simple_norm
from skimage import img_as_float32
from skimage.util import img_as_ubyte
from tqdm import tqdm
# Colors for the warning messages
class bcolors:
WARNING = '\033[31m'
#Disable some of the tensorflow warnings
import warnings
warnings.filterwarnings("ignore")
print("Libraries installed")
###Output
_____no_output_____
###Markdown
**3. Select your parameters and paths**--- **3.1. Setting main training parameters**--- **Paths for training, predictions and results****`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training Parameters****`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10) epochs, but a full training should run for 200 epochs or more. Evaluate the performance after training (see 5). **Default value: 200****Advanced Parameters - experienced users only****`patch_size`:** CycleGAN divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 4. **Default value: 512****When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** **`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1****`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0002**
###Code
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
InputFile = Training_source+"/*.png"
Training_target = "" #@param {type:"string"}
OutputFile = Training_target+"/*.png"
#Define where the patch file will be saved
base = "/content"
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 200#@param {type:"number"}
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
patch_size = 512#@param {type:"number"} # in pixels
batch_size = 1#@param {type:"number"}
initial_learning_rate = 0.0002 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 1
patch_size = 512
initial_learning_rate = 0.0002
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
print(bcolors.WARNING +"!! WARNING: "+model_name+" already exists and will be deleted in the following cell !!")
print(bcolors.WARNING +"To continue training "+model_name+", choose a new model_name here, and load "+model_name+" in section 3.3")
#To use Cyclegan we need to organise the data in a way the model can understand
Saving_path= "/content/"+model_name
#Saving_path= model_path+"/"+model_name
if os.path.exists(Saving_path):
shutil.rmtree(Saving_path)
os.makedirs(Saving_path)
TrainA_Folder = Saving_path+"/trainA"
if os.path.exists(TrainA_Folder):
shutil.rmtree(TrainA_Folder)
os.makedirs(TrainA_Folder)
TrainB_Folder = Saving_path+"/trainB"
if os.path.exists(TrainB_Folder):
shutil.rmtree(TrainB_Folder)
os.makedirs(TrainB_Folder)
# Here we disable pre-trained model by default (in case the cell is not ran)
Use_pretrained_model = False
# Here we disable data augmentation by default (in case the cell is not ran)
Use_Data_augmentation = True
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imageio.imread(Training_source+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
#Hyperparameters failsafes
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 4
if not patch_size % 4 == 0:
patch_size = ((int(patch_size / 4)-1) * 4)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 4; therefore the patch_size chosen is now:",patch_size)
random_choice_2 = random.choice(os.listdir(Training_target))
y = imageio.imread(Training_target+"/"+random_choice_2)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by flipping the patches. By default data augmentation is enabled.
###Code
#Data augmentation
#@markdown ##Play this cell to enable or disable data augmentation:
Use_Data_augmentation = True #@param {type:"boolean"}
if Use_Data_augmentation:
print("Data augmentation enabled")
if not Use_Data_augmentation:
print("Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CycleGAN model**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
h5_file_path_A = os.path.join(pretrained_model_path, "latest_net_G_A.pth")
h5_file_path_B = os.path.join(pretrained_model_path, "latest_net_G_B.pth")
# --------------------- Check the model exist ------------------------
if not os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print(bcolors.WARNING+'WARNING: Pretrained model does not exist')
Use_pretrained_model = False
print(bcolors.WARNING+'No pretrained network will be used.')
if os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print("Pretrained model "+os.path.basename(pretrained_model_path)+" was found and will be loaded prior to training.")
else:
print(bcolors.WARNING+'No pretrained network will be used.')
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Prepare the training data for training**---Here, we use the information from 3. to prepare the training data into a suitable format for training.
###Code
#@markdown ##Prepare the data for training
print("Data preparation in progress")
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
os.makedirs(model_path+'/'+model_name)
#--------------- Here we move the files to trainA and train B ---------
for f in os.listdir(Training_source):
shutil.copyfile(Training_source+"/"+f, TrainA_Folder+"/"+f)
for files in os.listdir(Training_target):
shutil.copyfile(Training_target+"/"+files, TrainB_Folder+"/"+files)
#---------------------------------------------------------------------
# CycleGAN use number of EPOCH withouth lr decay and number of EPOCH with lr decay
number_of_epochs_lr_stable = int(number_of_epochs/2)
number_of_epochs_lr_decay = int(number_of_epochs/2)
if Use_pretrained_model :
for f in os.listdir(pretrained_model_path):
if (f.startswith("latest_net_")):
shutil.copyfile(pretrained_model_path+"/"+f, model_path+'/'+model_name+"/"+f)
print("Data ready for training")
###Output
_____no_output_____
###Markdown
**4.2. Start Trainning**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches or continue the training in a second Colab session.
###Code
#@markdown ##Start training
start = time.time()
os.chdir("/content")
#--------------------------------- Command line inputs to change CycleGAN paramaters------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# visdom and HTML visualization parameters
#('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
#('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
#('--display_id', type=int, default=1, help='window id of the web display')
#('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
#('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
#('--display_port', type=int, default=8097, help='visdom port of the web display')
#('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
#('--print_freq', type=int, default=100, help='frequency of showing training results on console')
#('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
#('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
#('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
#('--save_by_iter', action='store_true', help='whether saves model by iteration')
#('--continue_train', action='store_true', help='continue training: load the latest model')
#('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
#('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
#('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
#('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
#('--beta1', type=float, default=0.5, help='momentum term of adam')
#('--lr', type=float, default=0.0002, help='initial learning rate for adam')
#('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
#('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
#('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
#('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations'
#---------------------------------------------------------
#----- Start the training ------------------------------------
if not Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --no_flip
if Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train --no_flip
#---------------------------------------------------------
print("Training, done.")
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
###Output
_____no_output_____
###Markdown
**4.3. Download your model(s) from Google Drive**---Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder. **5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**Unfortunately loss functions curve are not very informative for GAN network. Therefore we perform the QC here using a test dataset. **5.1. Choose the model you want to assess**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.2. Identify the best checkpoint to use to make predictions** CycleGAN save model checkpoints every five epochs. Due to the stochastic nature of GAN networks, the last checkpoint is not always the best one to use. As a consequence, it can be challenging to choose the most suitable checkpoint to use to make predictions.This section allows you to perform predictions using all the saved checkpoints and to estimate the quality of these predictions by comparing them to the provided ground truths images. Metric used include:**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
Image_type = "Grayscale" #@param ["Grayscale", "RGB"]
# average function
def Average(lst):
return sum(lst) / len(lst)
# Create a quality control folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control")
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_QC= "/content/"+QC_model_name
if os.path.exists(Saving_path_QC):
shutil.rmtree(Saving_path_QC)
os.makedirs(Saving_path_QC)
Saving_path_QC_folder = Saving_path_QC+"_images"
if os.path.exists(Saving_path_QC_folder):
shutil.rmtree(Saving_path_QC_folder)
os.makedirs(Saving_path_QC_folder)
#Here we copy and rename the all the checkpoint to be analysed
for f in os.listdir(full_QC_model_path):
shortname = f[:-6]
shortname = shortname + ".pth"
if f.endswith("net_G_A.pth"):
shutil.copyfile(full_QC_model_path+f, Saving_path_QC+"/"+shortname)
for files in os.listdir(Source_QC_folder):
shutil.copyfile(Source_QC_folder+"/"+files, Saving_path_QC_folder+"/"+files)
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = int(min(Image_Y, Image_X))
Nb_Checkpoint = len(os.listdir(Saving_path_QC))
print(Nb_Checkpoint)
## Initiate list
Checkpoint_list = []
Average_ssim_score_list = []
for j in range(1, len(os.listdir(Saving_path_QC))+1):
checkpoints = j*5
if checkpoints == Nb_Checkpoint*5:
checkpoints = "latest"
print("The checkpoint currently analysed is ="+str(checkpoints))
Checkpoint_list.append(checkpoints)
# Create a quality control/Prediction Folder
QC_prediction_results = QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)
if os.path.exists(QC_prediction_results):
shutil.rmtree(QC_prediction_results)
os.makedirs(QC_prediction_results)
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_QC_folder" --name "$QC_model_name" --model test --epoch $checkpoints --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$QC_prediction_results" --checkpoints_dir "/content/"
#-----------------------------------------------------------------------------------
#Here we need to move the data again and remove all the unnecessary folders
Checkpoint_name = "test_"+str(checkpoints)
QC_results_images = QC_prediction_results+"/"+QC_model_name+"/"+Checkpoint_name+"/images"
QC_results_images_files = os.listdir(QC_results_images)
for f in QC_results_images_files:
shutil.copyfile(QC_results_images+"/"+f, QC_prediction_results+"/"+f)
os.chdir("/content")
#Here we clean up the extra files
shutil.rmtree(QC_prediction_results+"/"+QC_model_name)
#-------------------------------- QC for RGB ------------------------------------
if Image_type == "RGB":
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, multichannel=True)
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM"])
# Initiate list
ssim_score_list = []
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
# -------------------------------- Source test data --------------------------------
test_source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
# -------------------------------- Prediction --------------------------------
test_prediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
#--------------------------- Here we normalise using histograms matching--------------------------------
test_prediction_matched = match_histograms(test_prediction, test_GT, multichannel=True)
test_source_matched = match_histograms(test_source, test_GT, multichannel=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT, test_prediction_matched)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT, test_source_matched)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
#------------------------------------------- QC for Grayscale ----------------------------------------------
if Image_type == "Grayscale":
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
ssim_score_list = []
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT_raw = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
test_GT = test_GT_raw[:,:,2]
# -------------------------------- Source test data --------------------------------
test_source_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
test_source = test_source_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
test_prediction = test_prediction_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_8bit = (img_RSE_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsPrediction_"+shortname_no_PNG+'.tif',img_RSE_GTvsPrediction_8bit)
img_RSE_GTvsSource_8bit = (img_RSE_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsSource_"+shortname_no_PNG+'.tif',img_RSE_GTvsSource_8bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
# All data is now processed saved
# -------------------------------- Display --------------------------------
# Display the IoV vs Threshold plot
plt.figure(figsize=(20,5))
plt.plot(Checkpoint_list, Average_ssim_score_list, label="SSIM")
plt.title('Checkpoints vs. SSIM')
plt.ylabel('SSIM')
plt.xlabel('Checkpoints')
plt.legend()
plt.show()
# -------------------------------- Display RGB --------------------------------
from ipywidgets import interact
import ipywidgets as widgets
if Image_type == "RGB":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
#Setting up colours
cmap = None
plt.figure(figsize=(10,10))
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_GT, cmap = cmap)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(Source_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_Source, cmap = cmap)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, cmap = cmap)
plt.title('Prediction',fontsize=15)
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
#plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
#plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
# -------------------------------- Display Grayscale --------------------------------
if Image_type == "Grayscale":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
NRMSE_GTvsPrediction = df2.loc[file, "Prediction v. GT NRMSE"]
NRMSE_GTvsSource = df2.loc[file, "Input v. GT NRMSE"]
PSNR_GTvsSource = df2.loc[file, "Input v. GT PSNR"]
PSNR_GTvsPrediction = df2.loc[file, "Prediction v. GT PSNR"]
plt.figure(figsize=(15,15))
cmap = None
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=True, pilmode="RGB")
plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99), cmap = 'gray')
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_real.png"))
plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsSource = img_SSIM_GTvsSource / 255
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsPrediction = img_SSIM_GTvsPrediction / 255
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsSource = img_RSE_GTvsSource / 255
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsPrediction = img_RSE_GTvsPrediction / 255
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as PNG images.**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.**`checkpoint`:** Choose the checkpoint number you would like to use to perform predictions. To use the "latest" checkpoint, input "latest".
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
import glob
import os.path
latest = "latest"
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
Prediction_model_folder = "" #@param {type:"string"}
#@markdown ###What model checkpoint would you like to use?
checkpoint = latest#@param {type:"raw"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
#here we check if we use the newly trained network or not
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
#here we check if the model exists
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Here we check that checkpoint exist, if not the closest one will be chosen
Nb_Checkpoint = len(glob.glob(os.path.join(full_Prediction_model_path, '*G_A.pth')))
print(Nb_Checkpoint)
if not checkpoint == "latest":
if checkpoint < 10:
checkpoint = 5
if not checkpoint % 5 == 0:
checkpoint = ((int(checkpoint / 5)-1) * 5)
print (bcolors.WARNING + " Your chosen checkpoints is not divisible by 5; therefore the checkpoints chosen is now:",checkpoints)
if checkpoint > Nb_Checkpoint*5:
checkpoint = "latest"
if checkpoint == Nb_Checkpoint*5:
checkpoint = "latest"
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_prediction= "/content/"+Prediction_model_name
if os.path.exists(Saving_path_prediction):
shutil.rmtree(Saving_path_prediction)
os.makedirs(Saving_path_prediction)
Saving_path_Data_folder = Saving_path_prediction+"/testA"
if os.path.exists(Saving_path_Data_folder):
shutil.rmtree(Saving_path_Data_folder)
os.makedirs(Saving_path_Data_folder)
for files in os.listdir(Data_folder):
shutil.copyfile(Data_folder+"/"+files, Saving_path_Data_folder+"/"+files)
Nb_files_Data_folder = len(os.listdir(Data_folder)) +10
#Here we copy and rename the checkpoint to be used
shutil.copyfile(full_Prediction_model_path+"/"+str(checkpoint)+"_net_G_A.pth", full_Prediction_model_path+"/"+str(checkpoint)+"_net_G.pth")
# This will find the image dimension of a randomly choosen image in Data_folder
random_choice = random.choice(os.listdir(Data_folder))
x = imageio.imread(Data_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
print(Image_min_dim)
#-------------------------------- Perform predictions -----------------------------
#-------------------------------- Options that can be used to perform predictions -----------------------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
#('--ntest', type=int, default=float("inf"), help='# of test examples.')
#('--results_dir', type=str, default='./results/', help='saves results here.')
#('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
#('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
#('--eval', action='store_true', help='use eval mode during test time.')
#('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
# To avoid cropping, the load_size should be the same as crop_size
#parser.set_defaults(load_size=parser.get_default('crop_size'))
#------------------------------------------------------------------------
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_Data_folder" --name "$Prediction_model_name" --model test --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$Result_folder" --checkpoints_dir "$Prediction_model_path" --num_test $Nb_files_Data_folder --epoch $checkpoint
#-----------------------------------------------------------------------------------
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
import os
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
random_choice_no_extension = os.path.splitext(random_choice)
x = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_real.png")
y = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_fake.png")
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Prediction')
plt.axis('off');
###Output
_____no_output_____
###Markdown
**CycleGAN**---CycleGAN is a method that can capture the characteristics of one image domain and learn how these characteristics can be translated into another image domain, all in the absence of any paired training examples. It was first published by [Zhu *et al.* in 2017](https://arxiv.org/abs/1703.10593). Unlike pix2pix, the image transformation performed does not require paired images for training (unsupervised learning) and is made possible here by using a set of two Generative Adversarial Networks (GANs) that learn to transform images both from the first domain to the second and vice-versa. **This particular notebook enables unpaired image-to-image translation. If your dataset is paired, you should also consider using the pix2pix notebook.**---*Disclaimer*:This notebook is part of the *Zero-Cost Deep-Learning to Enhance Microscopy* project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.This notebook is based on the following paper: **Unpaired Image-to-Image Translation using Cycle-Consistent Adversarial Networks** from Zhu *et al.* published in arXiv in 2018 (https://arxiv.org/abs/1703.10593)The source code of the CycleGAN PyTorch implementation can be found in: https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix**Please also cite this original paper when using or developing this notebook.** **License**---
###Code
#@markdown ##Double click to see the license information
#------------------------- LICENSE FOR ZeroCostDL4Mic------------------------------------
#This ZeroCostDL4Mic notebook is distributed under the MIT licence
#------------------------- LICENSE FOR CycleGAN ------------------------------------
#Copyright (c) 2017, Jun-Yan Zhu and Taesung Park
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
#AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
#IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
#FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
#DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
#OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#--------------------------- LICENSE FOR pix2pix --------------------------------
#BSD License
#For pix2pix software
#Copyright (c) 2016, Phillip Isola and Jun-Yan Zhu
#All rights reserved.
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are met:
#* Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#* Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#----------------------------- LICENSE FOR DCGAN --------------------------------
#BSD License
#For dcgan.torch software
#Copyright (c) 2015, Facebook, Inc. All rights reserved.
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
#Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#Neither the name Facebook nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###Output
_____no_output_____
###Markdown
**How to use this notebook?**---Video describing how to use our notebooks are available on youtube: - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook---**Structure of a notebook**The notebook contains two types of cell: **Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.---**Table of contents, Code snippets** and **Files**On the top left side of the notebook you find three tabs which contain from top to bottom:*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here. **Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here!---**Making changes to the notebook****You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).You can use the ``-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment. **0. Before getting started**--- To train CycleGAN, **you only need two folders containing PNG images**. The images do not need to be paired.While you do not need paired images to train CycleGAN, if possible, **we strongly recommend that you generate a paired dataset. This means that the same image needs to be acquired in the two conditions. These images can be used to assess the quality of your trained model (Quality control dataset)**. The quality control assessment can be done directly in this notebook. Please note that you currently can **only use .png files!**Here's a common data structure that can work:* Experiment A - **Training dataset (non-matching images) ** - Training_source - img_1.png, img_2.png, ... - Training_target - img_1.png, img_2.png, ... - **Quality control dataset (matching images)** - Training_source - img_1.png, img_2.png - Training_target - img_1.png, img_2.png - **Data to be predicted** - **Results**---**Important note**- If you wish to **Train a network from scratch** using your own dataset (and we encourage everyone to do that), you will need to run **sections 1 - 4**, then use **section 5** to assess the quality of your model and **section 6** to run predictions using the model that you trained.- If you wish to **Evaluate your model** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 5** to assess the quality of your model.- If you only wish to **run predictions** using a model previously generated and saved on your Google Drive, you will only need to run **sections 1 and 2** to set up the notebook, then use **section 6** to run the predictions on the desired model.--- **1. Initialise the Colab session**--- **1.1. Check for GPU access**---By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:Go to **Runtime -> Change the Runtime type****Runtime type: Python 3** *(Python 3 is programming language in which this program is written)***Accelator: GPU** *(Graphics processing unit)*
###Code
#@markdown ##Run this cell to check if you have GPU access
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime setting is correct then Google did not allocate a GPU for your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
!nvidia-smi
###Output
_____no_output_____
###Markdown
**1.2. Mount your Google Drive**--- To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook. Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive. Once this is done, your data are available in the **Files** tab on the top left of notebook.
###Code
#@markdown ##Run this cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
#mounts user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
###Output
_____no_output_____
###Markdown
**2. Install CycleGAN and dependencies**---
###Code
Notebook_version = ['1.11']
#@markdown ##Install CycleGAN and dependencies
#------- Code from the cycleGAN demo notebook starts here -------
#Here, we install libraries which are not already included in Colab.
!git clone https://github.com/junyanz/pytorch-CycleGAN-and-pix2pix
import os
os.chdir('pytorch-CycleGAN-and-pix2pix/')
!pip install -r requirements.txt
!pip install fpdf
import imageio
from skimage import data
from skimage import exposure
from skimage.exposure import match_histograms
from skimage.util import img_as_int
# ------- Common variable to all ZeroCostDL4Mic notebooks -------
import numpy as np
from matplotlib import pyplot as plt
import urllib
import os, random
import shutil
import zipfile
from tifffile import imread, imsave
import time
import sys
from pathlib import Path
import pandas as pd
import csv
from glob import glob
from scipy import signal
from scipy import ndimage
from skimage import io
from sklearn.linear_model import LinearRegression
from skimage.util import img_as_uint
import matplotlib as mpl
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from astropy.visualization import simple_norm
from skimage import img_as_float32
from skimage.util import img_as_ubyte
from tqdm import tqdm
from fpdf import FPDF, HTMLMixin
from datetime import datetime
from pip._internal.operations.freeze import freeze
import subprocess
# Colors for the warning messages
class bcolors:
WARNING = '\033[31m'
#Disable some of the tensorflow warnings
import warnings
warnings.filterwarnings("ignore")
print("Libraries installed")
# Check if this is the latest version of the notebook
Latest_notebook_version = pd.read_csv("https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_ZeroCostDL4Mic_Release.csv")
if Notebook_version == list(Latest_notebook_version.columns):
print("This notebook is up-to-date.")
if not Notebook_version == list(Latest_notebook_version.columns):
print(bcolors.WARNING +"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki")
!pip freeze > requirements.txt
###Output
_____no_output_____
###Markdown
**3. Select your parameters and paths**--- **3.1. Setting main training parameters**--- **Paths for training, predictions and results****`Training_source:`, `Training_target`:** These are the paths to your folders containing the Training_source and Training_target training data respecively. To find the paths of the folders containing the respective datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.**`model_name`:** Use only my_model -style, not my-model (Use "_" not "-"). Do not use spaces in the name. Avoid using the name of an existing model (saved in the same folder) as it will be overwritten.**`model_path`**: Enter the path where your model will be saved once trained (for instance your result folder).**Training Parameters****`number_of_epochs`:**Input how many epochs (rounds) the network will be trained. Preliminary results can already be observed after a few (10) epochs, but a full training should run for 200 epochs or more. Evaluate the performance after training (see 5). **Default value: 200****Advanced Parameters - experienced users only****`patch_size`:** CycleGAN divides the image into patches for training. Input the size of the patches (length of a side). The value should be smaller than the dimensions of the image and divisible by 4. **Default value: 512****When choosing the patch_size, the value should be i) large enough that it will enclose many instances, ii) small enough that the resulting patches fit into the RAM.** **`batch_size:`** This parameter defines the number of patches seen in each training step. Reducing or increasing the **batch size** may slow or speed up your training, respectively, and can influence network performance. **Default value: 1****`initial_learning_rate`:** Input the initial value to be used as learning rate. **Default value: 0.0002**
###Code
#@markdown ###Path to training images:
Training_source = "" #@param {type:"string"}
InputFile = Training_source+"/*.png"
Training_target = "" #@param {type:"string"}
OutputFile = Training_target+"/*.png"
#Define where the patch file will be saved
base = "/content"
# model name and path
#@markdown ###Name of the model and path to model folder:
model_name = "" #@param {type:"string"}
model_path = "" #@param {type:"string"}
# other parameters for training.
#@markdown ###Training Parameters
#@markdown Number of epochs:
number_of_epochs = 200#@param {type:"number"}
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True #@param {type:"boolean"}
#@markdown ###If not, please input:
patch_size = 512#@param {type:"number"} # in pixels
batch_size = 1#@param {type:"number"}
initial_learning_rate = 0.0002 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
batch_size = 1
patch_size = 512
initial_learning_rate = 0.0002
#here we check that no model with the same name already exist, if so delete
if os.path.exists(model_path+'/'+model_name):
print(bcolors.WARNING +"!! WARNING: "+model_name+" already exists and will be deleted in the following cell !!")
print(bcolors.WARNING +"To continue training "+model_name+", choose a new model_name here, and load "+model_name+" in section 3.3")
#To use Cyclegan we need to organise the data in a way the model can understand
Saving_path= "/content/"+model_name
#Saving_path= model_path+"/"+model_name
if os.path.exists(Saving_path):
shutil.rmtree(Saving_path)
os.makedirs(Saving_path)
TrainA_Folder = Saving_path+"/trainA"
if os.path.exists(TrainA_Folder):
shutil.rmtree(TrainA_Folder)
os.makedirs(TrainA_Folder)
TrainB_Folder = Saving_path+"/trainB"
if os.path.exists(TrainB_Folder):
shutil.rmtree(TrainB_Folder)
os.makedirs(TrainB_Folder)
# Here we disable pre-trained model by default (in case the cell is not ran)
Use_pretrained_model = False
# Here we disable data augmentation by default (in case the cell is not ran)
Use_Data_augmentation = True
# This will display a randomly chosen dataset input and output
random_choice = random.choice(os.listdir(Training_source))
x = imageio.imread(Training_source+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
#Hyperparameters failsafes
if patch_size > min(Image_Y, Image_X):
patch_size = min(Image_Y, Image_X)
print (bcolors.WARNING + " Your chosen patch_size is bigger than the xy dimension of your image; therefore the patch_size chosen is now:",patch_size)
# Here we check that patch_size is divisible by 4
if not patch_size % 4 == 0:
patch_size = ((int(patch_size / 4)-1) * 4)
print (bcolors.WARNING + " Your chosen patch_size is not divisible by 4; therefore the patch_size chosen is now:",patch_size)
random_choice_2 = random.choice(os.listdir(Training_target))
y = imageio.imread(Training_target+"/"+random_choice_2)
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Training source')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Training target')
plt.axis('off');
plt.savefig('/content/TrainingDataExample_cycleGAN.png',bbox_inches='tight',pad_inches=0)
###Output
_____no_output_____
###Markdown
**3.2. Data augmentation**--- Data augmentation can improve training progress by amplifying differences in the dataset. This can be useful if the available dataset is small since, in this case, it is possible that a network could quickly learn every example in the dataset (overfitting), without augmentation. Augmentation is not necessary for training and if your training dataset is large you should disable it.Data augmentation is performed here by flipping the patches. By default data augmentation is enabled.
###Code
#Data augmentation
#@markdown ##Play this cell to enable or disable data augmentation:
Use_Data_augmentation = True #@param {type:"boolean"}
if Use_Data_augmentation:
print("Data augmentation enabled")
if not Use_Data_augmentation:
print("Data augmentation disabled")
###Output
_____no_output_____
###Markdown
**3.3. Using weights from a pre-trained model as initial weights**--- Here, you can set the the path to a pre-trained model from which the weights can be extracted and used as a starting point for this training session. **This pre-trained model needs to be a CycleGAN model**. This option allows you to perform training over multiple Colab runtimes or to do transfer learning using models trained outside of ZeroCostDL4Mic. **You do not need to run this section if you want to train a network from scratch**. In order to continue training from the point where the pre-trained model left off, it is adviseable to also **load the learning rate** that was used when the training ended. This is automatically saved for models trained with ZeroCostDL4Mic and will be loaded here. If no learning rate can be found in the model folder provided, the default learning rate will be used.
###Code
# @markdown ##Loading weights from a pre-trained network
Use_pretrained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
pretrained_model_path = "" #@param {type:"string"}
# --------------------- Check if we load a previously trained model ------------------------
if Use_pretrained_model:
h5_file_path_A = os.path.join(pretrained_model_path, "latest_net_G_A.pth")
h5_file_path_B = os.path.join(pretrained_model_path, "latest_net_G_B.pth")
# --------------------- Check the model exist ------------------------
if not os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print(bcolors.WARNING+'WARNING: Pretrained model does not exist')
Use_pretrained_model = False
print(bcolors.WARNING+'No pretrained network will be used.')
if os.path.exists(h5_file_path_A) and os.path.exists(h5_file_path_B):
print("Pretrained model "+os.path.basename(pretrained_model_path)+" was found and will be loaded prior to training.")
else:
print(bcolors.WARNING+'No pretrained network will be used.')
###Output
_____no_output_____
###Markdown
**4. Train the network**--- **4.1. Prepare the training data for training**---Here, we use the information from 3. to prepare the training data into a suitable format for training.
###Code
#@markdown ##Prepare the data for training
print("Data preparation in progress")
if os.path.exists(model_path+'/'+model_name):
shutil.rmtree(model_path+'/'+model_name)
os.makedirs(model_path+'/'+model_name)
#--------------- Here we move the files to trainA and train B ---------
for f in os.listdir(Training_source):
shutil.copyfile(Training_source+"/"+f, TrainA_Folder+"/"+f)
for files in os.listdir(Training_target):
shutil.copyfile(Training_target+"/"+files, TrainB_Folder+"/"+files)
#---------------------------------------------------------------------
# CycleGAN use number of EPOCH withouth lr decay and number of EPOCH with lr decay
number_of_epochs_lr_stable = int(number_of_epochs/2)
number_of_epochs_lr_decay = int(number_of_epochs/2)
if Use_pretrained_model :
for f in os.listdir(pretrained_model_path):
if (f.startswith("latest_net_")):
shutil.copyfile(pretrained_model_path+"/"+f, model_path+'/'+model_name+"/"+f)
print("Data ready for training")
###Output
_____no_output_____
###Markdown
**4.2. Start Training**---When playing the cell below you should see updates after each epoch (round). Network training can take some time.* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches or continue the training in a second Colab session.
###Code
#@markdown ##Start training
start = time.time()
os.chdir("/content")
#--------------------------------- Command line inputs to change CycleGAN paramaters------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
# visdom and HTML visualization parameters
#('--display_freq', type=int, default=400, help='frequency of showing training results on screen')
#('--display_ncols', type=int, default=4, help='if positive, display all images in a single visdom web panel with certain number of images per row.')
#('--display_id', type=int, default=1, help='window id of the web display')
#('--display_server', type=str, default="http://localhost", help='visdom server of the web display')
#('--display_env', type=str, default='main', help='visdom display environment name (default is "main")')
#('--display_port', type=int, default=8097, help='visdom port of the web display')
#('--update_html_freq', type=int, default=1000, help='frequency of saving training results to html')
#('--print_freq', type=int, default=100, help='frequency of showing training results on console')
#('--no_html', action='store_true', help='do not save intermediate training results to [opt.checkpoints_dir]/[opt.name]/web/')
# network saving and loading parameters
#('--save_latest_freq', type=int, default=5000, help='frequency of saving the latest results')
#('--save_epoch_freq', type=int, default=5, help='frequency of saving checkpoints at the end of epochs')
#('--save_by_iter', action='store_true', help='whether saves model by iteration')
#('--continue_train', action='store_true', help='continue training: load the latest model')
#('--epoch_count', type=int, default=1, help='the starting epoch count, we save the model by <epoch_count>, <epoch_count>+<save_latest_freq>, ...')
#('--phase', type=str, default='train', help='train, val, test, etc')
# training parameters
#('--n_epochs', type=int, default=100, help='number of epochs with the initial learning rate')
#('--n_epochs_decay', type=int, default=100, help='number of epochs to linearly decay learning rate to zero')
#('--beta1', type=float, default=0.5, help='momentum term of adam')
#('--lr', type=float, default=0.0002, help='initial learning rate for adam')
#('--gan_mode', type=str, default='lsgan', help='the type of GAN objective. [vanilla| lsgan | wgangp]. vanilla GAN loss is the cross-entropy objective used in the original GAN paper.')
#('--pool_size', type=int, default=50, help='the size of image buffer that stores previously generated images')
#('--lr_policy', type=str, default='linear', help='learning rate policy. [linear | step | plateau | cosine]')
#('--lr_decay_iters', type=int, default=50, help='multiply by a gamma every lr_decay_iters iterations'
#---------------------------------------------------------
#----- Start the training ------------------------------------
if not Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --no_flip
if Use_pretrained_model:
if Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train
if not Use_Data_augmentation:
!python pytorch-CycleGAN-and-pix2pix/train.py --dataroot "$Saving_path" --input_nc 3 --name $model_name --model cycle_gan --batch_size $batch_size --preprocess scale_width_and_crop --load_size $Image_min_dim --crop_size $patch_size --checkpoints_dir "$model_path" --no_html --n_epochs $number_of_epochs_lr_stable --n_epochs_decay $number_of_epochs_lr_decay --lr $initial_learning_rate --display_id 0 --save_epoch_freq 5 --continue_train --no_flip
#---------------------------------------------------------
print("Training, done.")
# Displaying the time elapsed for training
dt = time.time() - start
mins, sec = divmod(dt, 60)
hour, mins = divmod(mins, 60)
print("Time elapsed:",hour, "hour(s)",mins,"min(s)",round(sec),"sec(s)")
# save FPDF() class into a
# variable pdf
from datetime import datetime
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'cycleGAN'
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Training report for '+Network+' model ('+model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
# add another cell
training_time = "Training time: "+str(hour)+ "hour(s) "+str(mins)+"min(s) "+str(round(sec))+"sec(s)"
pdf.cell(190, 5, txt = training_time, ln = 1, align='L')
pdf.ln(1)
Header_2 = 'Information for your materials and method:'
pdf.cell(190, 5, txt=Header_2, ln=1, align='L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
#print(all_packages)
#Main Packages
main_packages = ''
version_numbers = []
for name in ['tensorflow','numpy','torch']:
find_name=all_packages.find(name)
main_packages = main_packages+all_packages[find_name:all_packages.find(',',find_name)]+', '
#Version numbers only here:
version_numbers.append(all_packages[find_name+len(name)+2:all_packages.find(',',find_name)])
cuda_version = subprocess.run('nvcc --version',stdout=subprocess.PIPE, shell=True)
cuda_version = cuda_version.stdout.decode('utf-8')
cuda_version = cuda_version[cuda_version.find(', V')+3:-1]
gpu_name = subprocess.run('nvidia-smi',stdout=subprocess.PIPE, shell=True)
gpu_name = gpu_name.stdout.decode('utf-8')
gpu_name = gpu_name[gpu_name.find('Tesla'):gpu_name.find('Tesla')+10]
#print(cuda_version[cuda_version.find(', V')+3:-1])
#print(gpu_name)
shape = io.imread(Training_source+'/'+os.listdir(Training_source)[1]).shape
dataset_size = len(os.listdir(Training_source))
text = 'The '+Network+' model was trained from scratch for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and a least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
if Use_pretrained_model:
text = 'The '+Network+' model was trained for '+str(number_of_epochs)+' epochs on '+str(dataset_size)+' paired image patches (image dimensions: '+str(shape)+', patch size: ('+str(patch_size)+','+str(patch_size)+')) with a batch size of '+str(batch_size)+' and an least-square GAN loss function, using the '+Network+' ZeroCostDL4Mic notebook (v '+Notebook_version[0]+') (von Chamier & Laine et al., 2020). The model was retrained from a pretrained model. Key python packages used include tensorflow (v '+version_numbers[0]+'), numpy (v '+version_numbers[1]+'), torch (v '+version_numbers[2]+'), cuda (v '+cuda_version+'). The training was accelerated using a '+gpu_name+'GPU.'
pdf.set_font('')
pdf.set_font_size(10.)
pdf.multi_cell(190, 5, txt = text, align='L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(1)
pdf.cell(28, 5, txt='Augmentation: ', ln=0)
pdf.set_font('')
if Use_Data_augmentation:
aug_text = 'The dataset was augmented by default'
else:
aug_text = 'No augmentation was used for training.'
pdf.multi_cell(190, 5, txt=aug_text, align='L')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Parameters', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
if Use_Default_Advanced_Parameters:
pdf.cell(200, 5, txt='Default Advanced Parameters were enabled')
pdf.cell(200, 5, txt='The following parameters were used for training:')
pdf.ln(1)
html = """
<table width=40% style="margin-left:0px;">
<tr>
<th width = 50% align="left">Parameter</th>
<th width = 50% align="left">Value</th>
</tr>
<tr>
<td width = 50%>number_of_epochs</td>
<td width = 50%>{0}</td>
</tr>
<tr>
<td width = 50%>patch_size</td>
<td width = 50%>{1}</td>
</tr>
<tr>
<td width = 50%>batch_size</td>
<td width = 50%>{2}</td>
</tr>
<tr>
<td width = 50%>initial_learning_rate</td>
<td width = 50%>{3}</td>
</tr>
</table>
""".format(number_of_epochs,str(patch_size)+'x'+str(patch_size),batch_size,initial_learning_rate)
pdf.write_html(html)
#pdf.multi_cell(190, 5, txt = text_2, align='L')
pdf.set_font("Arial", size = 11, style='B')
pdf.ln(1)
pdf.cell(190, 5, txt = 'Training Dataset', align='L', ln=1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(30, 5, txt= 'Training_source:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_source, align = 'L')
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(29, 5, txt= 'Training_target:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = Training_target, align = 'L')
#pdf.cell(190, 5, txt=aug_text, align='L', ln=1)
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(22, 5, txt= 'Model Path:', align = 'L', ln=0)
pdf.set_font('')
pdf.multi_cell(170, 5, txt = model_path+'/'+model_name, align = 'L')
pdf.ln(1)
pdf.cell(60, 5, txt = 'Example Training pair', ln=1)
pdf.ln(1)
exp_size = io.imread('/content/TrainingDataExample_cycleGAN.png').shape
pdf.image('/content/TrainingDataExample_cycleGAN.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: Zhu, Jun-Yan, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
# if Use_Data_augmentation:
# ref_3 = '- Augmentor: Bloice, Marcus D., Christof Stocker, and Andreas Holzinger. "Augmentor: an image augmentation library for machine learning." arXiv preprint arXiv:1708.04680 (2017).'
# pdf.multi_cell(190, 5, txt = ref_3, align='L')
pdf.ln(3)
reminder = 'Important:\nRemember to perform the quality control step on all newly trained models\nPlease consider depositing your training dataset on Zenodo'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(model_path+'/'+model_name+'/'+model_name+"_training_report.pdf")
###Output
_____no_output_____
###Markdown
**4.3. Download your model(s) from Google Drive**---Once training is complete, the trained model is automatically saved on your Google Drive, in the **model_path** folder that was selected in Section 3. It is however wise to download the folder as all data can be erased at the next training if using the same folder. **5. Evaluate your model**---This section allows the user to perform important quality checks on the validity and generalisability of the trained model. **We highly recommend to perform quality control on all newly trained models.**Unfortunately loss functions curve are not very informative for GAN network. Therefore we perform the QC here using a test dataset. **5.1. Choose the model you want to assess**
###Code
# model name and path
#@markdown ###Do you want to assess the model you just trained ?
Use_the_current_trained_model = True #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
QC_model_folder = "" #@param {type:"string"}
#Here we define the loaded model name and path
QC_model_name = os.path.basename(QC_model_folder)
QC_model_path = os.path.dirname(QC_model_folder)
if (Use_the_current_trained_model):
QC_model_name = model_name
QC_model_path = model_path
full_QC_model_path = QC_model_path+'/'+QC_model_name+'/'
if os.path.exists(full_QC_model_path):
print("The "+QC_model_name+" network will be evaluated")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
###Output
_____no_output_____
###Markdown
**5.2. Identify the best checkpoint to use to make predictions** CycleGAN save model checkpoints every five epochs. Due to the stochastic nature of GAN networks, the last checkpoint is not always the best one to use. As a consequence, it can be challenging to choose the most suitable checkpoint to use to make predictions.This section allows you to perform predictions using all the saved checkpoints and to estimate the quality of these predictions by comparing them to the provided ground truths images. Metric used include:**1. The SSIM (structural similarity) map** The SSIM metric is used to evaluate whether two images contain the same structures. It is a normalized metric and an SSIM of 1 indicates a perfect similarity between two images. Therefore for SSIM, the closer to 1, the better. The SSIM maps are constructed by calculating the SSIM metric in each pixel by considering the surrounding structural similarity in the neighbourhood of that pixel (currently defined as window of 11 pixels and with Gaussian weighting of 1.5 pixel standard deviation, see our Wiki for more info). **mSSIM** is the SSIM value calculated across the entire window of both images.**The output below shows the SSIM maps with the mSSIM****2. The RSE (Root Squared Error) map** This is a display of the root of the squared difference between the normalized predicted and target or the source and the target. In this case, a smaller RSE is better. A perfect agreement between target and prediction will lead to an RSE map showing zeros everywhere (dark).**NRMSE (normalised root mean squared error)** gives the average difference between all pixels in the images compared to each other. Good agreement yields low NRMSE scores.**PSNR (Peak signal-to-noise ratio)** is a metric that gives the difference between the ground truth and prediction (or source input) in decibels, using the peak pixel values of the prediction and the MSE between the images. The higher the score the better the agreement.**The output below shows the RSE maps with the NRMSE and PSNR values.**
###Code
#@markdown ##Choose the folders that contain your Quality Control dataset
Source_QC_folder = "" #@param{type:"string"}
Target_QC_folder = "" #@param{type:"string"}
Image_type = "Grayscale" #@param ["Grayscale", "RGB"]
# average function
def Average(lst):
return sum(lst) / len(lst)
# Create a quality control folder
if os.path.exists(QC_model_path+"/"+QC_model_name+"/Quality Control"):
shutil.rmtree(QC_model_path+"/"+QC_model_name+"/Quality Control")
os.makedirs(QC_model_path+"/"+QC_model_name+"/Quality Control")
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_QC= "/content/"+QC_model_name
if os.path.exists(Saving_path_QC):
shutil.rmtree(Saving_path_QC)
os.makedirs(Saving_path_QC)
Saving_path_QC_folder = Saving_path_QC+"_images"
if os.path.exists(Saving_path_QC_folder):
shutil.rmtree(Saving_path_QC_folder)
os.makedirs(Saving_path_QC_folder)
#Here we copy and rename the all the checkpoint to be analysed
for f in os.listdir(full_QC_model_path):
shortname = f[:-6]
shortname = shortname + ".pth"
if f.endswith("net_G_A.pth"):
shutil.copyfile(full_QC_model_path+f, Saving_path_QC+"/"+shortname)
for files in os.listdir(Source_QC_folder):
shutil.copyfile(Source_QC_folder+"/"+files, Saving_path_QC_folder+"/"+files)
# This will find the image dimension of a randomly chosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = int(min(Image_Y, Image_X))
Nb_Checkpoint = len(os.listdir(Saving_path_QC))
print(Nb_Checkpoint)
## Initiate list
Checkpoint_list = []
Average_ssim_score_list = []
for j in range(1, len(os.listdir(Saving_path_QC))+1):
checkpoints = j*5
if checkpoints == Nb_Checkpoint*5:
checkpoints = "latest"
print("The checkpoint currently analysed is ="+str(checkpoints))
Checkpoint_list.append(checkpoints)
# Create a quality control/Prediction Folder
QC_prediction_results = QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)
if os.path.exists(QC_prediction_results):
shutil.rmtree(QC_prediction_results)
os.makedirs(QC_prediction_results)
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_QC_folder" --name "$QC_model_name" --model test --epoch $checkpoints --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$QC_prediction_results" --checkpoints_dir "/content/"
#-----------------------------------------------------------------------------------
#Here we need to move the data again and remove all the unnecessary folders
Checkpoint_name = "test_"+str(checkpoints)
QC_results_images = QC_prediction_results+"/"+QC_model_name+"/"+Checkpoint_name+"/images"
QC_results_images_files = os.listdir(QC_results_images)
for f in QC_results_images_files:
shutil.copyfile(QC_results_images+"/"+f, QC_prediction_results+"/"+f)
os.chdir("/content")
#Here we clean up the extra files
shutil.rmtree(QC_prediction_results+"/"+QC_model_name)
#-------------------------------- QC for RGB ------------------------------------
if Image_type == "RGB":
# List images in Source_QC_folder
# This will find the image dimension of a randomly choosen image in Source_QC_folder
random_choice = random.choice(os.listdir(Source_QC_folder))
x = imageio.imread(Source_QC_folder+"/"+random_choice)
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, multichannel=True)
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM"])
# Initiate list
ssim_score_list = []
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
# -------------------------------- Source test data --------------------------------
test_source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
# -------------------------------- Prediction --------------------------------
test_prediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
#--------------------------- Here we normalise using histograms matching--------------------------------
test_prediction_matched = match_histograms(test_prediction, test_GT, multichannel=True)
test_source_matched = match_histograms(test_source, test_GT, multichannel=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT, test_prediction_matched)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT, test_source_matched)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
#------------------------------------------- QC for Grayscale ----------------------------------------------
if Image_type == "Grayscale":
def ssim(img1, img2):
return structural_similarity(img1,img2,data_range=1.,full=True, gaussian_weights=True, use_sample_covariance=False, sigma=1.5)
def normalize(x, pmin=3, pmax=99.8, axis=None, clip=False, eps=1e-20, dtype=np.float32):
mi = np.percentile(x,pmin,axis=axis,keepdims=True)
ma = np.percentile(x,pmax,axis=axis,keepdims=True)
return normalize_mi_ma(x, mi, ma, clip=clip, eps=eps, dtype=dtype)
def normalize_mi_ma(x, mi, ma, clip=False, eps=1e-20, dtype=np.float32):#dtype=np.float32
if dtype is not None:
x = x.astype(dtype,copy=False)
mi = dtype(mi) if np.isscalar(mi) else mi.astype(dtype,copy=False)
ma = dtype(ma) if np.isscalar(ma) else ma.astype(dtype,copy=False)
eps = dtype(eps)
try:
import numexpr
x = numexpr.evaluate("(x - mi) / ( ma - mi + eps )")
except ImportError:
x = (x - mi) / ( ma - mi + eps )
if clip:
x = np.clip(x,0,1)
return x
def norm_minmse(gt, x, normalize_gt=True):
if normalize_gt:
gt = normalize(gt, 0.1, 99.9, clip=False).astype(np.float32, copy = False)
x = x.astype(np.float32, copy=False) - np.mean(x)
#x = x - np.mean(x)
gt = gt.astype(np.float32, copy=False) - np.mean(gt)
#gt = gt - np.mean(gt)
scale = np.cov(x.flatten(), gt.flatten())[0, 1] / np.var(x.flatten())
return gt, scale * x
# Open and create the csv file that will contain all the QC metrics
with open(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", "w", newline='') as file:
writer = csv.writer(file)
# Write the header in the csv file
writer.writerow(["image #","Prediction v. GT mSSIM","Input v. GT mSSIM", "Prediction v. GT NRMSE", "Input v. GT NRMSE", "Prediction v. GT PSNR", "Input v. GT PSNR"])
# Let's loop through the provided dataset in the QC folders
for i in os.listdir(Source_QC_folder):
if not os.path.isdir(os.path.join(Source_QC_folder,i)):
print('Running QC on: '+i)
ssim_score_list = []
shortname_no_PNG = i[:-4]
# -------------------------------- Target test data (Ground truth) --------------------------------
test_GT_raw = imageio.imread(os.path.join(Target_QC_folder, i), as_gray=False, pilmode="RGB")
test_GT = test_GT_raw[:,:,2]
# -------------------------------- Source test data --------------------------------
test_source_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_real.png"))
test_source = test_source_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and Source image
test_GT_norm,test_source_norm = norm_minmse(test_GT, test_source, normalize_gt=True)
# -------------------------------- Prediction --------------------------------
test_prediction_raw = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints),shortname_no_PNG+"_fake.png"))
test_prediction = test_prediction_raw[:,:,2]
# Normalize the images wrt each other by minimizing the MSE between GT and prediction
test_GT_norm,test_prediction_norm = norm_minmse(test_GT, test_prediction, normalize_gt=True)
# -------------------------------- Calculate the metric maps and save them --------------------------------
# Calculate the SSIM maps
index_SSIM_GTvsPrediction, img_SSIM_GTvsPrediction = ssim(test_GT_norm, test_prediction_norm)
index_SSIM_GTvsSource, img_SSIM_GTvsSource = ssim(test_GT_norm, test_source_norm)
ssim_score_list.append(index_SSIM_GTvsPrediction)
#Save ssim_maps
img_SSIM_GTvsPrediction_8bit = (img_SSIM_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsPrediction_"+shortname_no_PNG+'.tif',img_SSIM_GTvsPrediction_8bit)
img_SSIM_GTvsSource_8bit = (img_SSIM_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/SSIM_GTvsSource_"+shortname_no_PNG+'.tif',img_SSIM_GTvsSource_8bit)
# Calculate the Root Squared Error (RSE) maps
img_RSE_GTvsPrediction = np.sqrt(np.square(test_GT_norm - test_prediction_norm))
img_RSE_GTvsSource = np.sqrt(np.square(test_GT_norm - test_source_norm))
# Save SE maps
img_RSE_GTvsPrediction_8bit = (img_RSE_GTvsPrediction* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsPrediction_"+shortname_no_PNG+'.tif',img_RSE_GTvsPrediction_8bit)
img_RSE_GTvsSource_8bit = (img_RSE_GTvsSource* 255).astype("uint8")
io.imsave(QC_model_path+'/'+QC_model_name+"/Quality Control/"+str(checkpoints)+"/RSE_GTvsSource_"+shortname_no_PNG+'.tif',img_RSE_GTvsSource_8bit)
# -------------------------------- Calculate the RSE metrics and save them --------------------------------
# Normalised Root Mean Squared Error (here it's valid to take the mean of the image)
NRMSE_GTvsPrediction = np.sqrt(np.mean(img_RSE_GTvsPrediction))
NRMSE_GTvsSource = np.sqrt(np.mean(img_RSE_GTvsSource))
# We can also measure the peak signal to noise ratio between the images
PSNR_GTvsPrediction = psnr(test_GT_norm,test_prediction_norm,data_range=1.0)
PSNR_GTvsSource = psnr(test_GT_norm,test_source_norm,data_range=1.0)
writer.writerow([i,str(index_SSIM_GTvsPrediction),str(index_SSIM_GTvsSource),str(NRMSE_GTvsPrediction),str(NRMSE_GTvsSource),str(PSNR_GTvsPrediction),str(PSNR_GTvsSource)])
#Here we calculate the ssim average for each image in each checkpoints
Average_SSIM_checkpoint = Average(ssim_score_list)
Average_ssim_score_list.append(Average_SSIM_checkpoint)
# All data is now processed saved
# -------------------------------- Display --------------------------------
# Display the IoV vs Threshold plot
plt.figure(figsize=(20,5))
plt.plot(Checkpoint_list, Average_ssim_score_list, label="SSIM")
plt.title('Checkpoints vs. SSIM')
plt.ylabel('SSIM')
plt.xlabel('Checkpoints')
plt.legend()
plt.savefig(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png',bbox_inches='tight',pad_inches=0)
plt.show()
# -------------------------------- Display RGB --------------------------------
from ipywidgets import interact
import ipywidgets as widgets
if Image_type == "RGB":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
#Setting up colours
cmap = None
plt.figure(figsize=(10,10))
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_GT, cmap = cmap)
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(Source_QC_folder, file), as_gray=False, pilmode="RGB")
plt.imshow(img_Source, cmap = cmap)
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, cmap = cmap)
plt.title('Prediction',fontsize=15)
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
#plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
#plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
# -------------------------------- Display Grayscale --------------------------------
if Image_type == "Grayscale":
random_choice_shortname_no_PNG = shortname_no_PNG
@interact
def show_results(file=os.listdir(Source_QC_folder), checkpoints=Checkpoint_list):
random_choice_shortname_no_PNG = file[:-4]
df1 = pd.read_csv(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints)+"/"+"QC_metrics_"+QC_model_name+str(checkpoints)+".csv", header=0)
df2 = df1.set_index("image #", drop = False)
index_SSIM_GTvsPrediction = df2.loc[file, "Prediction v. GT mSSIM"]
index_SSIM_GTvsSource = df2.loc[file, "Input v. GT mSSIM"]
NRMSE_GTvsPrediction = df2.loc[file, "Prediction v. GT NRMSE"]
NRMSE_GTvsSource = df2.loc[file, "Input v. GT NRMSE"]
PSNR_GTvsSource = df2.loc[file, "Input v. GT PSNR"]
PSNR_GTvsPrediction = df2.loc[file, "Prediction v. GT PSNR"]
plt.figure(figsize=(15,15))
cmap = None
# Target (Ground-truth)
plt.subplot(3,3,1)
plt.axis('off')
img_GT = imageio.imread(os.path.join(Target_QC_folder, file), as_gray=True, pilmode="RGB")
plt.imshow(img_GT, norm=simple_norm(img_GT, percent = 99), cmap = 'gray')
plt.title('Target',fontsize=15)
# Source
plt.subplot(3,3,2)
plt.axis('off')
img_Source = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_real.png"))
plt.imshow(img_Source, norm=simple_norm(img_Source, percent = 99))
plt.title('Source',fontsize=15)
#Prediction
plt.subplot(3,3,3)
plt.axis('off')
img_Prediction = io.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), random_choice_shortname_no_PNG+"_fake.png"))
plt.imshow(img_Prediction, norm=simple_norm(img_Prediction, percent = 99))
plt.title('Prediction',fontsize=15)
#Setting up colours
cmap = plt.cm.CMRmap
#SSIM between GT and Source
plt.subplot(3,3,5)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsSource = img_SSIM_GTvsSource / 255
imSSIM_GTvsSource = plt.imshow(img_SSIM_GTvsSource, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imSSIM_GTvsSource,fraction=0.046, pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsSource,3)),fontsize=14)
plt.ylabel('SSIM maps',fontsize=20, rotation=0, labelpad=75)
#SSIM between GT and Prediction
plt.subplot(3,3,6)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_SSIM_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "SSIM_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_SSIM_GTvsPrediction = img_SSIM_GTvsPrediction / 255
imSSIM_GTvsPrediction = plt.imshow(img_SSIM_GTvsPrediction, cmap = cmap, vmin=0,vmax=1)
plt.colorbar(imSSIM_GTvsPrediction,fraction=0.046, pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('mSSIM: '+str(round(index_SSIM_GTvsPrediction,3)),fontsize=14)
#Root Squared Error between GT and Source
plt.subplot(3,3,8)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsSource = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsSource_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsSource = img_RSE_GTvsSource / 255
imRSE_GTvsSource = plt.imshow(img_RSE_GTvsSource, cmap = cmap, vmin=0, vmax = 1)
plt.colorbar(imRSE_GTvsSource,fraction=0.046,pad=0.04)
plt.title('Target vs. Source',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsSource,3))+', PSNR: '+str(round(PSNR_GTvsSource,3)),fontsize=14)
#plt.title('Target vs. Source PSNR: '+str(round(PSNR_GTvsSource,3)))
plt.ylabel('RSE maps',fontsize=20, rotation=0, labelpad=75)
#Root Squared Error between GT and Prediction
plt.subplot(3,3,9)
#plt.axis('off')
plt.tick_params(
axis='both', # changes apply to the x-axis and y-axis
which='both', # both major and minor ticks are affected
bottom=False, # ticks along the bottom edge are off
top=False, # ticks along the top edge are off
left=False, # ticks along the left edge are off
right=False, # ticks along the right edge are off
labelbottom=False,
labelleft=False)
img_RSE_GTvsPrediction = imageio.imread(os.path.join(QC_model_path+"/"+QC_model_name+"/Quality Control/"+str(checkpoints), "RSE_GTvsPrediction_"+random_choice_shortname_no_PNG+".tif"))
img_RSE_GTvsPrediction = img_RSE_GTvsPrediction / 255
imRSE_GTvsPrediction = plt.imshow(img_RSE_GTvsPrediction, cmap = cmap, vmin=0, vmax=1)
plt.colorbar(imRSE_GTvsPrediction,fraction=0.046,pad=0.04)
plt.title('Target vs. Prediction',fontsize=15)
plt.xlabel('NRMSE: '+str(round(NRMSE_GTvsPrediction,3))+', PSNR: '+str(round(PSNR_GTvsPrediction,3)),fontsize=14)
plt.savefig(full_QC_model_path+'/Quality Control/QC_example_data.png',bbox_inches='tight',pad_inches=0)
#Make a pdf summary of the QC results
from datetime import datetime
class MyFPDF(FPDF, HTMLMixin):
pass
pdf = MyFPDF()
pdf.add_page()
pdf.set_right_margin(-1)
pdf.set_font("Arial", size = 11, style='B')
Network = 'cycleGAN'
day = datetime.now()
datetime_str = str(day)[0:10]
Header = 'Quality Control report for '+Network+' model ('+QC_model_name+')\nDate: '+datetime_str
pdf.multi_cell(180, 5, txt = Header, align = 'L')
all_packages = ''
for requirement in freeze(local_only=True):
all_packages = all_packages+requirement+', '
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(2)
pdf.cell(190, 5, txt = 'Development of Training Losses', ln=1, align='L')
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png').shape
pdf.image(full_QC_model_path+'Quality Control/SSIMvsCheckpoint_data.png', x = 11, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(2)
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.ln(3)
pdf.cell(80, 5, txt = 'Example Quality Control Visualisation', ln=1)
pdf.ln(1)
exp_size = io.imread(full_QC_model_path+'Quality Control/QC_example_data.png').shape
if Image_type == 'RGB':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/5), h = round(exp_size[0]/5))
if Image_type == 'Grayscale':
pdf.image(full_QC_model_path+'Quality Control/QC_example_data.png', x = 16, y = None, w = round(exp_size[1]/8), h = round(exp_size[0]/8))
pdf.ln(1)
pdf.set_font('')
pdf.set_font('Arial', size = 11, style = 'B')
pdf.ln(1)
pdf.cell(180, 5, txt = 'Quality Control Metrics', align='L', ln=1)
pdf.set_font('')
pdf.set_font_size(10.)
pdf.ln(1)
for checkpoint in os.listdir(full_QC_model_path+'Quality Control'):
if os.path.isdir(os.path.join(full_QC_model_path,'Quality Control',checkpoint)):
pdf.set_font('')
pdf.set_font('Arial', size = 10, style = 'B')
pdf.cell(70, 5, txt = 'Metrics for checkpoint: '+ str(checkpoint), align='L', ln=1)
html = """
<body>
<font size="8" face="Courier New" >
<table width=95% style="margin-left:0px;">"""
with open(full_QC_model_path+'Quality Control/'+str(checkpoint)+'/QC_metrics_'+QC_model_name+str(checkpoint)+'.csv', 'r') as csvfile:
metrics = csv.reader(csvfile)
header = next(metrics)
image = header[0]
mSSIM_PvsGT = header[1]
mSSIM_SvsGT = header[2]
header = """
<tr>
<th width = 60% align="left">{0}</th>
<th width = 20% align="center">{1}</th>
<th width = 20% align="center">{2}</th>
</tr>""".format(image,mSSIM_PvsGT,mSSIM_SvsGT)
html = html+header
for row in metrics:
image = row[0]
mSSIM_PvsGT = row[1]
mSSIM_SvsGT = row[2]
cells = """
<tr>
<td width = 60% align="left">{0}</td>
<td width = 20% align="center">{1}</td>
<td width = 20% align="center">{2}</td>
</tr>""".format(image,str(round(float(mSSIM_PvsGT),3)),str(round(float(mSSIM_SvsGT),3)))
html = html+cells
html = html+"""</body></table>"""
pdf.write_html(html)
pdf.ln(2)
else:
continue
pdf.ln(1)
pdf.set_font('')
pdf.set_font_size(10.)
ref_1 = 'References:\n - ZeroCostDL4Mic: von Chamier, Lucas & Laine, Romain, et al. "ZeroCostDL4Mic: an open platform to simplify access and use of Deep-Learning in Microscopy." BioRxiv (2020).'
pdf.multi_cell(190, 5, txt = ref_1, align='L')
ref_2 = '- cycleGAN: Zhu, Jun-Yan, et al. "Unpaired image-to-image translation using cycle-consistent adversarial networks." Proceedings of the IEEE international conference on computer vision. 2017.'
pdf.multi_cell(190, 5, txt = ref_2, align='L')
pdf.ln(3)
reminder = 'To find the parameters and other information about how this model was trained, go to the training_report.pdf of this model which should be in the folder of the same name.'
pdf.set_font('Arial', size = 11, style='B')
pdf.multi_cell(190, 5, txt=reminder, align='C')
pdf.output(full_QC_model_path+'Quality Control/'+QC_model_name+'_QC_report.pdf')
###Output
_____no_output_____
###Markdown
**6. Using the trained model**---In this section the unseen data is processed using the trained model (in section 4). First, your unseen images are uploaded and prepared for prediction. After that your trained model from section 4 is activated and finally saved into your Google Drive. **6.1. Generate prediction(s) from unseen dataset**---The current trained model (from section 4.2) can now be used to process images. If you want to use an older model, untick the **Use_the_current_trained_model** box and enter the name and path of the model to use. Predicted output images are saved in your **Result_folder** folder as PNG images.**`Data_folder`:** This folder should contain the images that you want to use your trained network on for processing.**`Result_folder`:** This folder will contain the predicted output images.**`checkpoint`:** Choose the checkpoint number you would like to use to perform predictions. To use the "latest" checkpoint, input "latest".
###Code
#@markdown ### Provide the path to your dataset and to the folder where the predictions are saved, then play the cell to predict outputs from your unseen images.
import glob
import os.path
latest = "latest"
Data_folder = "" #@param {type:"string"}
Result_folder = "" #@param {type:"string"}
# model name and path
#@markdown ###Do you want to use the current trained model?
Use_the_current_trained_model = False #@param {type:"boolean"}
#@markdown ###If not, please provide the path to the model folder:
Prediction_model_folder = "" #@param {type:"string"}
#@markdown ###What model checkpoint would you like to use?
checkpoint = latest#@param {type:"raw"}
#Here we find the loaded model name and parent path
Prediction_model_name = os.path.basename(Prediction_model_folder)
Prediction_model_path = os.path.dirname(Prediction_model_folder)
#here we check if we use the newly trained network or not
if (Use_the_current_trained_model):
print("Using current trained network")
Prediction_model_name = model_name
Prediction_model_path = model_path
#here we check if the model exists
full_Prediction_model_path = Prediction_model_path+'/'+Prediction_model_name+'/'
if os.path.exists(full_Prediction_model_path):
print("The "+Prediction_model_name+" network will be used.")
else:
W = '\033[0m' # white (normal)
R = '\033[31m' # red
print(R+'!! WARNING: The chosen model does not exist !!'+W)
print('Please make sure you provide a valid model path and model name before proceeding further.')
# Here we check that checkpoint exist, if not the closest one will be chosen
Nb_Checkpoint = len(glob.glob(os.path.join(full_Prediction_model_path, '*G_A.pth')))
print(Nb_Checkpoint)
if not checkpoint == "latest":
if checkpoint < 10:
checkpoint = 5
if not checkpoint % 5 == 0:
checkpoint = ((int(checkpoint / 5)-1) * 5)
print (bcolors.WARNING + " Your chosen checkpoints is not divisible by 5; therefore the checkpoints chosen is now:",checkpoints)
if checkpoint > Nb_Checkpoint*5:
checkpoint = "latest"
if checkpoint == Nb_Checkpoint*5:
checkpoint = "latest"
# Here we need to move the data to be analysed so that cycleGAN can find them
Saving_path_prediction= "/content/"+Prediction_model_name
if os.path.exists(Saving_path_prediction):
shutil.rmtree(Saving_path_prediction)
os.makedirs(Saving_path_prediction)
Saving_path_Data_folder = Saving_path_prediction+"/testA"
if os.path.exists(Saving_path_Data_folder):
shutil.rmtree(Saving_path_Data_folder)
os.makedirs(Saving_path_Data_folder)
for files in os.listdir(Data_folder):
shutil.copyfile(Data_folder+"/"+files, Saving_path_Data_folder+"/"+files)
Nb_files_Data_folder = len(os.listdir(Data_folder)) +10
#Here we copy and rename the checkpoint to be used
shutil.copyfile(full_Prediction_model_path+"/"+str(checkpoint)+"_net_G_A.pth", full_Prediction_model_path+"/"+str(checkpoint)+"_net_G.pth")
# This will find the image dimension of a randomly choosen image in Data_folder
random_choice = random.choice(os.listdir(Data_folder))
x = imageio.imread(Data_folder+"/"+random_choice)
#Find image XY dimension
Image_Y = x.shape[0]
Image_X = x.shape[1]
Image_min_dim = min(Image_Y, Image_X)
print(Image_min_dim)
#-------------------------------- Perform predictions -----------------------------
#-------------------------------- Options that can be used to perform predictions -----------------------------
# basic parameters
#('--dataroot', required=True, help='path to images (should have subfolders trainA, trainB, valA, valB, etc)')
#('--name', type=str, default='experiment_name', help='name of the experiment. It decides where to store samples and models')
#('--gpu_ids', type=str, default='0', help='gpu ids: e.g. 0 0,1,2, 0,2. use -1 for CPU')
#('--checkpoints_dir', type=str, default='./checkpoints', help='models are saved here')
# model parameters
#('--model', type=str, default='cycle_gan', help='chooses which model to use. [cycle_gan | pix2pix | test | colorization]')
#('--input_nc', type=int, default=3, help='# of input image channels: 3 for RGB and 1 for grayscale')
#('--output_nc', type=int, default=3, help='# of output image channels: 3 for RGB and 1 for grayscale')
#('--ngf', type=int, default=64, help='# of gen filters in the last conv layer')
#('--ndf', type=int, default=64, help='# of discrim filters in the first conv layer')
#('--netD', type=str, default='basic', help='specify discriminator architecture [basic | n_layers | pixel]. The basic model is a 70x70 PatchGAN. n_layers allows you to specify the layers in the discriminator')
#('--netG', type=str, default='resnet_9blocks', help='specify generator architecture [resnet_9blocks | resnet_6blocks | unet_256 | unet_128]')
#('--n_layers_D', type=int, default=3, help='only used if netD==n_layers')
#('--norm', type=str, default='instance', help='instance normalization or batch normalization [instance | batch | none]')
#('--init_type', type=str, default='normal', help='network initialization [normal | xavier | kaiming | orthogonal]')
#('--init_gain', type=float, default=0.02, help='scaling factor for normal, xavier and orthogonal.')
#('--no_dropout', action='store_true', help='no dropout for the generator')
# dataset parameters
#('--dataset_mode', type=str, default='unaligned', help='chooses how datasets are loaded. [unaligned | aligned | single | colorization]')
#('--direction', type=str, default='AtoB', help='AtoB or BtoA')
#('--serial_batches', action='store_true', help='if true, takes images in order to make batches, otherwise takes them randomly')
#('--num_threads', default=4, type=int, help='# threads for loading data')
#('--batch_size', type=int, default=1, help='input batch size')
#('--load_size', type=int, default=286, help='scale images to this size')
#('--crop_size', type=int, default=256, help='then crop to this size')
#('--max_dataset_size', type=int, default=float("inf"), help='Maximum number of samples allowed per dataset. If the dataset directory contains more than max_dataset_size, only a subset is loaded.')
#('--preprocess', type=str, default='resize_and_crop', help='scaling and cropping of images at load time [resize_and_crop | crop | scale_width | scale_width_and_crop | none]')
#('--no_flip', action='store_true', help='if specified, do not flip the images for data augmentation')
#('--display_winsize', type=int, default=256, help='display window size for both visdom and HTML')
# additional parameters
#('--epoch', type=str, default='latest', help='which epoch to load? set to latest to use latest cached model')
#('--load_iter', type=int, default='0', help='which iteration to load? if load_iter > 0, the code will load models by iter_[load_iter]; otherwise, the code will load models by [epoch]')
#('--verbose', action='store_true', help='if specified, print more debugging information')
#('--suffix', default='', type=str, help='customized suffix: opt.name = opt.name + suffix: e.g., {model}_{netG}_size{load_size}')
#('--ntest', type=int, default=float("inf"), help='# of test examples.')
#('--results_dir', type=str, default='./results/', help='saves results here.')
#('--aspect_ratio', type=float, default=1.0, help='aspect ratio of result images')
#('--phase', type=str, default='test', help='train, val, test, etc')
# Dropout and Batchnorm has different behavioir during training and test.
#('--eval', action='store_true', help='use eval mode during test time.')
#('--num_test', type=int, default=50, help='how many test images to run')
# rewrite devalue values
# To avoid cropping, the load_size should be the same as crop_size
#parser.set_defaults(load_size=parser.get_default('crop_size'))
#------------------------------------------------------------------------
#---------------------------- Predictions are performed here ----------------------
os.chdir("/content")
!python pytorch-CycleGAN-and-pix2pix/test.py --dataroot "$Saving_path_Data_folder" --name "$Prediction_model_name" --model test --no_dropout --preprocess scale_width --load_size $Image_min_dim --crop_size $Image_min_dim --results_dir "$Result_folder" --checkpoints_dir "$Prediction_model_path" --num_test $Nb_files_Data_folder --epoch $checkpoint
#-----------------------------------------------------------------------------------
###Output
_____no_output_____
###Markdown
**6.2. Inspect the predicted output**---
###Code
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
import os
# This will display a randomly chosen dataset input and predicted output
random_choice = random.choice(os.listdir(Data_folder))
random_choice_no_extension = os.path.splitext(random_choice)
x = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_real.png")
y = imageio.imread(Result_folder+"/"+Prediction_model_name+"/test_"+str(checkpoint)+"/images/"+random_choice_no_extension[0]+"_fake.png")
f=plt.figure(figsize=(16,8))
plt.subplot(1,2,1)
plt.imshow(x, interpolation='nearest')
plt.title('Input')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(y, interpolation='nearest')
plt.title('Prediction')
plt.axis('off');
###Output
_____no_output_____
|
assignment_6/ngocha_assignment_6 .ipynb
|
###Markdown
HOME ASSIGNMENT 6: CLOUD FUNCTION & STREAMLIT**Mục đích của bài Assignment**> * [Optional] Data Deploy Cloud Function > * Tạo Data Apps với Streamlit> * Thao tác với data bằng Pandas> * Data Visualization**Các kiến thức áp dụng*** Slack API, JSON to DataFrame* GCP Cloud Function* Streamlit* Python Pandas* Python Data Visualization**Lời Khuyên*** Các bạn dành thời gian ôn lại và xâu chuỗi kiến thức* Review Assignment 1-5 cho ít nhất 2 bạn học viên khác
###Code
!ls ..
###Output
LICENSE [34massignment_4[m[m env_variable.json [34mstreamlit[m[m
README.md [34massignment_5[m[m [34mgcp-cloud-function[m[m
[34massignment_2[m[m [34massignment_6[m[m [34mimg[m[m
[34massignment_3[m[m [34mdata[m[m [34mpython-for-data[m[m
###Markdown
TODO 1: Python Data VizHoàn tất các sets bài tập trên [Kaggle Data Visualization](https://www.kaggle.com/learn/data-visualization) - Nếu chưa hoàn thành trong [Assignment 5](https://github.com/anhdanggit/atom-assignments/blob/main/assignment_5/home_assignment_5.ipynb) https://github.com/ngochant/atom-assignments/blob/main/assignment_5/home_assignment_5.ipynb TODO 2 (OPTIONAL): DEPLOY GOOGLE CLOUD FUNCTION* Làm theo Lab của Week 6: [HERE](https://anhdang.gitbook.io/datacracy/atom/6-cloud-function-and-streamlit/6.2-lab-cloud-function-hands-on)* Click đôi vào các tab Markdown bên dưới để trả lời các câu hỏi ([Markdown Cheatsheet](https://guides.github.com/features/mastering-markdown/)) Screenshot Cloud Function on GCP> *Copy Screenshot vào folder img trong repo, và đổi link bên dưới* Screenshot Cloud Function Testing on GCP> *Copy Screenshot vào folder img trong repo, và đổi link bên dưới* Screenshot Cloud Function Call on Postman> *Copy Screenshot vào folder img trong repo, và đổi link bên dưới* Các lỗi gặp trong quá trình thực hiện *Liên kê bên dưới các lỗi bạn gặp và các giải quyết*1. 2. 3. TODO 3: HIỂU & DIAGRAM CODE STREAMLIT Mình thường khuyên các bạn mới học code rằng:> Hãy code với một cây bút chì và tờ giấyNhư thế nào?1. Đầu tiên, là hình dung trong đầu: Bạn sẽ bắt đầu từ gì (`inputs`) và cho ra những gì (`output`)2. Rồi, để đi từ inputs đến outputs thì bạn cần thực hiện những bước nào (các `functions`)Bạn có thể vẽ ra một diagram như vậy giúp bạn:* Nhìn bức tranh lớn, và không bị sa đà vào tiểu tiết, syntax* Rõ ràng hơn về flow * Giúp bạn tối ưu flow trước, rồi sau đó code sẽ thuận lợi hơn* Rất hiệu quả để bạn debugs trong tương laiTham khảo Diagram sau của [streamlit/data_glimpse.py](https://github.com/anhdanggit/atom-assignments/blob/main/streamlit/data_glimpse.py) và vẽ diagram theo cách hiểu của bạn cho [streamlit/datacracy_slack.py](https://github.com/anhdanggit/atom-assignments/blob/main/streamlit/datacracy_slack.py) Diagram Data Glimpse Apps> Bên dưới là ví dụ Diagram của app [streamlit/data_glimpse.py](https://github.com/anhdanggit/atom-assignments/blob/main/streamlit/data_glimpse.py) DataCracy Slack > Là apps để tổng hợp lịch sử nộp bài, review và discussion của Datacracy Learners Diagram DataCracy Slack Apps* Xem code của app [streamlit/datacracy_slack.py](https://github.com/anhdanggit/atom-assignments/blob/main/streamlit/datacracy_slack.py)> *Copy Diagram bạn vẽ vào folder img trong repo, và đổi link bên dưới*
###Code
from IPython.display import Image
Image('..\img\slack_flowchart.jpg')
###Output
_____no_output_____
###Markdown
Giải thích Xem code của app [streamlit/datacracy_slack.py](https://github.com/anhdanggit/atom-assignments/blob/main/streamlit/datacracy_slack.py):1. Trong mỗi function (steps) trong Diagram của bạn, giải thích function làm những việc gì?2. Liệt kê các logics được áp dụng để xử lý data?
###Code
## Trong mỗi function (steps) trong Diagram của bạn, giải thích function làm những việc gì?
1. Load slack data: load_user_df, load_channel_df, load_msg_df
- Load dữ liệu user/channel/msg từ Slack thông qua API và lưu vào dataFrame with name: user_df, channel_df, msg_df
2. Filter data
- Lọc thông tin User_id từ input
- Lọc thông tin trong msg_df để lưu vào data frame filter_msg_df với điều kiện ID được input == user_id | reply_user1 | reply_user2)
3. process_msg_data:
- Từ filter_msg_df, xử lý thông tin về reply user, channel name và thời gian để tạo thành dataframe p_msg_df
4. get_submission/ get_review/ get_discussion:
- Lấy dữ liệu từ p_msg_df và filter ra các thông tin của ID input cần tổng hợp và lưu các thông tin đó vào trong submit_df/reivew_df/discuss_df
5. Summary:
- Từ submit_df/reivew_df/discuss_df dataFrames count lại số bài đã nộp/ số bài review/ tỉ lệ số bài được người khác reivew/ tổng số từ đã thảo luận trên kênh thảo luận
## Liệt kê các logics được áp dụng để xử lý data?
- Decisiom: nếu có thông tin ID input trong data-> process data, nếu không thì trả về kết quả <không tìm thấy>
- filter_user_df: sử dụng để filter thông tin của input User_id.
- filter_msg_df: lấy thông tin ID được input từ các reply_user sử dụng cho review và discussion.
- p_msg_df sử dụng function process_msg_data để xử lý dữ liệu message channel từ đầu vào là filter_msg_df
- Submission
Chỉ lấy channel nào có name chứa thông tin là assignment
Chỉ lấy User_id mà có DataCracy Role là Learner.
Chỉ lấy msg được submission cuối cùng
- Review
Không lấy những trường hợp user tự reply msg
Chỉ lấy channel nào có name chứa thông tin assignment.
Chỉ lấy User_id mà có DataCracy Role là Learner.
- Discussion
Chỉ lấy channel nào có name chứa thông tin discuss, mỗi user nằm trong từng group
###Output
_____no_output_____
|
data_processing/TREATER/4.gap_filler.ipynb
|
###Markdown
1. FILLING LARGE GAPS
###Code
gaps_dict = {}
gaps_dict_flat = {}
for key in dts:
print(key)
gaps_dict[key] = find_series_gaps(dts[key])
gaps_dict_flat[key] = flatten_series_gaps(gaps_dict[key])
for key in dts:
fill_gaps(dts[key], gaps_dict[key], 30)
for key in dts:
print(key)
gaps_dict[key] = find_series_gaps(dts[key])
gaps_dict_flat[key] = flatten_series_gaps(gaps_dict[key])
for key in dts:
dts[key].interpolate(method='from_derivatives', inplace=True)
new_gaps_dict = {}
new_gaps_dict_flat = {}
for key in dts:
print(key)
new_gaps_dict[key] = find_series_gaps(dts[key])
new_gaps_dict_flat[key] = flatten_series_gaps(new_gaps_dict[key])
for key in dts:
dts[key].dropna(inplace=True)
(dts['computer'].isna() == False).all()
start_date = '2021-02-17 00:00:00'
stop_date = '2021-03-29 23:59:59'
for key in dts:
dts[key].name = key
sel = dts[key].loc[start_date:stop_date]
sel.to_csv(FLLD_DB_DIR+"/{}.csv".format(key), float_format='%.4f')
###Output
_____no_output_____
###Markdown
1. FILLING LARGE GAPS
###Code
gaps_dict = {}
gaps_dict_flat = {}
for key in dts:
print(key)
gaps_dict[key] = find_series_gaps(dts[key])
gaps_dict_flat[key] = flatten_series_gaps(gaps_dict[key])
for key in dts:
fill_gaps(dts[key], gaps_dict[key], 30)
for key in dts:
print(key)
gaps_dict[key] = find_series_gaps(dts[key])
gaps_dict_flat[key] = flatten_series_gaps(gaps_dict[key])
for key in dts:
dts[key].interpolate(method='from_derivatives', inplace=True)
new_gaps_dict = {}
new_gaps_dict_flat = {}
for key in dts:
print(key)
new_gaps_dict[key] = find_series_gaps(dts[key])
new_gaps_dict_flat[key] = flatten_series_gaps(new_gaps_dict[key])
for key in dts:
dts[key].dropna(inplace=True)
(dts['computer'].isna() == False).all()
start_date = '2021-02-17 00:00:00'
stop_date = '2021-03-29 23:59:59'
for key in dts:
dts[key].name = key
sel = dts[key].loc[start_date:stop_date]
sel.to_csv(FLLD_DB_DIR+"/{}.csv".format(key), float_format='%.4f')
###Output
_____no_output_____
|
Session11/Day1/IntroductionToBasicStellarPhotometry.ipynb
|
###Markdown
Introduction to Basic Stellar Photometry Measuring Flux in 1D**Version 0.1**In this notebook we will introduce some basic concepts related to measuring the flux of a point source. As this is an introduction, several challenges associated with image processing will be ignored or simplified (for example, we will simulate stars in 1 dimension). Nevertheless, the concepts below adapt nicely to charge coupled devices (CCDs) with a small field of view ($\ll 1\,\deg^{2}$), and where stellar crowding is minimal. A good reference for such problems is the [Handbook for CCD Astronomy](https://ui.adsabs.harvard.edu/abs/2006hca..book.....H/abstract) by Steve Howell. However, as we will see throughout this session, the basic methods introduced here are insufficient for an ~all sky survey with a large field-of-view detector, as will be conducted by the Vera C. Rubin Observatory. We will learn more about those challenges and solutions in later lectures.The problems below are inspired by Robert Lupton (who has forgotten more about image processing than I will ever know), so it may be worthwhile to checkout his [original notebook](https://github.com/LSSTC-DSFP/LSSTC-DSFP-Sessions/blob/master/Session1/Day2/ImageProcessing/Image%20Procesing%20Workbook%20I.ipynb).* * *By AA Miller (CIERA/Northwestern & Adler)
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
###Output
_____no_output_____
###Markdown
Problem 1) An (oversimplified) 1-D ModelFor this introductory problem we are going to simulate a 1 dimensional detector. Simulated stars that are detected by said detector will have Gaussian profiles $\mathcal{N}(\mu, \sigma^2)$, with mean position $\mu$ and variance $\sigma^2$.As observed by LSST, all stars are point sources that reflect the point spread function (PSF), which is produced by a combination of the atmosphere, telescope, and detector. A standard measure of the PSF's width is the Full Width Half Maximum (FWHM). For a Gaussian the FWHM = $2\sqrt{2 \ln (2)} \sigma \approx 2.3548\,\sigma$.In addition to the signal from the stars, the 1D detector also detects a smooth background of light from several sources (the atmosphere, the detector, etc). We will refer to this background simply as "The Sky." **Problem 1a**Write a function `phi()` to simulate a (noise-free) 1D Gaussian PSF. The function should take `mu` and `fwhm` as arguments, and evaluate the PSF along a user-supplied array `x`.
###Code
def phi(x, mu, fwhm):
"""Evalute the 1d PSF N(mu, sigma^2) along x
Parameters
----------
x : array-like of shape (n_pixels,)
detector pixel number
mu : float
mean position of the 1D star
fwhm : float
Full-width half-maximum of the stellar profile on the detector
Returns
-------
flux : array-like of shape (n_pixels,)
Flux in each pixel of the input array
"""
sigma = # complete
flux = # complete
return # complete
###Output
_____no_output_____
###Markdown
**Helper Function**CCDs measure pixelated signals. To clearly remind ourselves that that is the case, I have created a helper function that shows the pixelated counts in our 1D detector. The function assumes that the input positions are *the left edge* of the pixel.You can use this function throughout the notebook below.
###Code
from matplotlib.ticker import MultipleLocator
def pixel_plot(pix, counts):
'''Make a pixelated 1D plot'''
fig, ax = plt.subplots()
ax.step(pix, counts,
where='post')
ax.set_xlabel('pixel number')
ax.set_ylabel('relative counts')
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.set_major_locator(MultipleLocator(5))
fig.tight_layout()
###Output
_____no_output_____
###Markdown
**Problem 1b**Plot the noise-free PSF for a star with a profile defined by $\mu = 10$ and $\mathrm{FWHM} = 3$. Estimate the total flux of this star by "integrating" over the counts measured by the detector. If you are clever in the definition of your pixels, this integration reduces to a sum.*Hint - think about your input grid of pixels. Can you have a non-integer number of pixels? Also - the flux should be evaluated at the center of the pixel.*
###Code
x = # complete
pixel_plot( # complete
print("The flux of the star is: {:.3f}".format( # complete
###Output
_____no_output_____
###Markdown
**Problem 1c**Now add sky noise to the detector (for now we will assume the sky noise is constant in every pixel). We will define the the sky as `S`, and the total stellar flux as `F`.Plot the simulated counts for `S` = 100 and `F` = 500. (Use the same stellar profile as **1b**)
###Code
S = # complete
F = # complete
pixel_plot( # complete
###Output
_____no_output_____
###Markdown
Problem 2) Add NoiseFor optical detectors (such as those used by the Rubin Observatory), the noise $n$ in a pixel is almost entirely shot noise due to the finite number of photons that have been detected. Therefore, within each pixel if the "true" signal in the detector would produce $n$ photons, then the noise/uncertainty in each pixel follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution), which has the property that the mean $\lambda$ is equal to the variance $\lambda$. If $n \gg 1$ then $P(\lambda) \approx \mathcal{N}(\lambda, \lambda)$. We will make this simplifying assumption for the remainder of this problem. **Problem 2a**Plot the noisy counts in each pixel for the simulated signal (star + sky) in **1c**. Visually compare these results to your previous plot.*Hint* - you may find the function [`np.random.normal()`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.normal.html) or [`np.random.poisson()`](https://numpy.org/devdocs/reference/random/generated/numpy.random.poisson.html) helpful.
###Code
np.random.seed(2020)
signal = # complete
signal_plus_noise = # complete
pixel_plot( # complete
###Output
_____no_output_____
###Markdown
**Problem 2b**Estimate the flux of the star using the same method you used in **1b**.Does your estimate match your expectation? (recall that $F = 500$) If not, why not?
###Code
print('The total flux = {:.4f}'.format( # complete
###Output
_____no_output_____
###Markdown
*write your answer here* **Problem 2c**The flux has been measured incorrectly because we are counting photons from the "sky." Subtract the sky background S from the counts in the detector and once again estimate the flux using the same method as **1b**.Does your estimate match your expectation? (recall that $F = 500$) *Note - estimating the value of the "sky" background in real life is extremely difficult and an entire lecture will be devoted to this topic*
###Code
print('The total flux = {:.4f}'.format( # complete
###Output
_____no_output_____
###Markdown
*write your answer here* We will now examine alternative methods of estimating the flux. Problem 3) Aperture Flux MeasurementsIn some sense, **2c** provides the most simplistic method for estimating the flux of star (add up all the counts after subtracting th background). This solution, however, cannot generalize to any sort of real life problem because there are always multiple stars (and galaxies) detected in every individual image. However, we can approximate the above procedure by "isolating" the individual stars in any image (say by taking a 10x10 cutout around each star), and then estimating the flux in a similar fashion. As we are about to show, the size of the box (or more appropriately a circle for 2D optical images) is critical when estimating the flux. **Problem 3a**Write a function `simulate()` to simulate the noisy count measurements of a star with centroid `mu`, FWHM `fwhm`, sky background `S`, and flux `F`.*Hint* - it may be helpful to plot the output of your function.
###Code
def simulate(x, mu, fwhm, S, F):
'''simulate a noisy stellar signal
Parameters
----------
x : array-like
detector pixel number
mu : float
mean position of the 1D star
fwhm : float
Full-width half-maximum of the stellar profile on the detector
S : float
Constant sky background in each pixel
F : float
Total stellar flux
Returns
-------
noisy_counts : array-like (same shape as x)
the (noisy) number of counts in each pixel
'''
# complete
# complete
# complete
noisy_counts = # complete
return noisy_counts
###Output
_____no_output_____
###Markdown
We will now perform aperture photometry. In optical astronomy, most apertures are a top-hat function, and the flux is estimated by multiplying the aperture by the (background-subtracted) signal and summing the resulting output. In 2D apertures are typically a circle, for the remainder of this 1D problem we will use a top-hat aperture. If the aperture contains partial pixels, then the counts in pixels with parial coverage are multiplied by the fractional coverage of the pixel. **Problem 3b** Using an aperture with a radius of 5 pixels centered on the source (i.e. the aperture is 10 pixels long), measure the flux from a star centered at `mu` = 20, with `fwhm` = 5, `S` = 100, and `F` = 1000. Assume you can perfectly measure the background, and subtract this prior to measuring the flux.**Extra long** *Hint* - if you create your aperture using a single `np.where()` command (or similar) you are not going to get the correct answer. To quickly see why this is the case, imagine you'd been asked to use an aperture with a radius of 3.2 pixels. Thus, properly calculating the aperture requires a bit of thought. A for loop is a fairly intuitive way to handle this problem (though it can also be done with a series of where commands, and both possibilities will be presented in the solutions). **DO NOT SPEND TOO MUCH TIME ON THIS PROBLEM** If you get stuck, use a single `np.where()` command. When you plot the results in the **3c** you'll see how things are off, but this won't affect the general results in the remainder of the notebook.
###Code
x = np.linspace(0,40,41)
mu = 20
S = 100
fwhm = 5
F = 1000
sim_star = # complete
ap_radius = # complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
print("The star has flux = {:.3f}".format( # complete
###Output
_____no_output_____
###Markdown
**Problem 3c**Plot the simulated counts from **3b** and overplot your aperture. You may want to multiple the aperture by a factor of 100 to see it better.*Hint* – after you have run `pixel_plot()` you will need to call matplotlib functions directly (e.g., `plt.plot`) to overplot on the pixel data. Also, if you created an `aperture_mask` in **3b** it may help to plot that as well.
###Code
pixel_plot( # complete
# add aperture
plt.plot( # complete
###Output
_____no_output_____
###Markdown
**Problem 3c** Write a Monte Carlo simulator to estimate the mean and standard deviation of the flux from the simulated star.*Food for thought* - what do you notice if you run your simulator many times?
###Code
sim_fluxes = np.empty(1000)
for sim_num, dummy in enumerate(sim_fluxes):
sim_star = simulate(x, mu, 5, S, 1000)
ap_radius = # complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
sim_fluxes[sim_num] = # complete
print("The mean flux = {:.3f} with variance = {:.3f}".format( # complete
###Output
_____no_output_____
###Markdown
Problem 4) Avoid Arbitrary Numbers -- the Curve of GrowthIn **Problem 3** we decided to use an aperture radius of 5. Why? (In practice, an aperture radius equal to the FWHM is a pretty good choice, as we will show below. However, this is not optimal in all situations)We will now try to optimize the choice of aperture for the star in question. **Problem 4a**Using your solution to **Problem 3**, write a function `aperture_flux()` that estimates the mean flux and it's variance in a given aperture of size `ap_radius` for a simulated star. *Hint* - this function is going to have many inputs, `ap_radius`, the position of the star, the flux of the star, the FWHM of the star, a pixel grid, the value of the sky background, and the number of simulations per input radius.
###Code
def aperture_flux(ap_radius,
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
# complete
return # complete
###Output
_____no_output_____
###Markdown
**Problem 4b**Confirm your function works by calculating the mean and variance of the flux in a 5 pixel radius aperture.
###Code
mean, var = aperture_flux(5)
print('The mean flux in a r = 5 pix aperture is {:.4f} +/- {:.4f}'.format( # complete
###Output
_____no_output_____
###Markdown
**Problem 4c**Build successively larger apertures with sizes increasing from a radius of 1 to 10 pixels. Measure the mean and variance for each aperture size. Plot the results.Which aperture size has the smallest variance? Is this aperture best? Do these results make sense?
###Code
ap_array = # complete
# complete
# complete
for ap_num, ap_rad in enumerate(ap_array):
# complete
fig, ax = plt.subplots()
ax.errorbar( # complete
# complete
# complete
# complete
print('aperture radius = {} has the smallest variance'.format( # complete
###Output
_____no_output_____
###Markdown
*write your answer here* Small apertures fail to measure all the light from the source. Large apertures do measure all the light, but at the cost of higher variance. In practice, these challenges can be alleviated if the point spread function is known. (This is a challenging problem and the subject of an entire lecture this week, as the PSF is essentially never known a priori and must be estimated from the images themselves.)In this case, we know the PSF is a 1D Gaussian. We can therefore calculate "aperture corrections" to determine the flux at any radius on the above plot (known as the curve of growth -- in some cases the aperture corrections can be determined directly from the curve of growth but that can be challenging on real images, as things like stellar blends remove all the simplicity of the single star problem that we have here). To determine the aperture correction at any radius $r$, we can simply integrate a Gaussian (our know PSF for this simulated problem) over the size of the aperture and then divide the aperture flux (and standard deviation) by this result to estimate the true flux in each aperture.This can easily be done for our 1D Gaussian with scipy.
###Code
from scipy.stats import norm
def curve_of_growth(r):
'''Return aperture correction for aperture of size r
Parameters
----------
r : float
radius of the aperture, in units of the
Gaussian standard deviation
Returns
-------
apcor : float
the aperture correction at radius r
'''
return norm.cdf(r) - norm.cdf(-r)
###Output
_____no_output_____
###Markdown
**Problem 4d**Calculate the analytic curve of growth for each of your apertures from **4c**. Re-plot the (corrected) flux in each aperture. Do you notice anything different? *Hint* – recall the relation between FWHM and the standard deviation for a Gaussian.
###Code
cog = # complete
fig, ax = plt.subplots()
ax.errorbar(# complete
# complete
# complete
# complete
###Output
_____no_output_____
###Markdown
**Problem 4e**Plot the uncertainty on the flux estimate (i.e., the square root of the variance) as a function of aperture radius.Now which aperture size do you think is best?
###Code
fig, ax = plt.subplots()
ax.plot( # complete
ax.set_xlabel('aperture radius (pix)')
ax.set_ylabel('uncertainty in F')
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*write your answer here* Here we have discovered a universal truth about aperture photometry: very small and very large apertures produce lower signal-to-noise estimates than something in between. However, the optimal value of that something in between is different for every star (as you will show below). **Problem 4f**Leaving all other variables the same, estimate the optimal aperture size (i.e. maximize the signal-to-noise ratio) for a star with a flux of 10000.What is the optimal aperture size?*Hint* –– you only need to repeat **4c** and **4e** for this answer.
###Code
# complete
# complete
# complete
# complete
# complete
# complete
print('The optimal aperture is {} pix'.format( # complete
###Output
_____no_output_____
###Markdown
**Problem 4g**Leaving all other variables the same, estimate the optimal aperture size (i.e. maximize the signal-to-noise ratio) for a star with a flux of 10.What is the optimal aperture size? Can you even measure the flux of this star?
###Code
# complete
# complete
# complete
# complete
# complete
# complete
print('The optimal aperture is {} pix'.format( # complete
###Output
_____no_output_____
###Markdown
UpshotDropping simple apertures on an image provides a fast and simple method to estimate the flux of a star. This approach comes at a cost, however, as the aperture method employed here provides high variance estimates of the flux. Fortunately, it is possible to do much better via PSF photometry (and in fact, the [Cramer-Rao bound](https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93Rao_bound) mathematically proves that PSF photometry is the lowest variance estimator of the flux of a star). This means that *aperture photometry is never better than PSF photometry* despite some claims to contrary in the literature. There are cases where the PSF is extremely difficult to estimate, in which case aperture photometry may be the only decent way to estimate the flux, but even then PSF photometry would be better. (Technically speaking, aperture photometry *is* PSF photometry. The catch is that the PSF model (a 1D or circular top hat) is a terrible match to the actual aparition of the stars on the image. When the model of the PSF is good, and in the case of our simulated data set we know the PSF perfectly, then PSF flux estimates will be a minimum variance estimator.) Problem 5/Challenge Problem) PSF Flux measurementWe are going to cover PSF modeling and PSF photometry in far greater detail later this week, but here we are going to quickly meausure the flux using a model of the PSF, which we will compare to the aperture results. **Problem 5a**Create the psf model, `psf`, which is equivalent to a noise-free star with `fwhm` = 5.
###Code
psf = # complete
###Output
_____no_output_____
###Markdown
**Problem 5b** Using the same parameters as problem 3, simulate a star and measure it's PSF flux.*Hint* - you may find the [`minimize`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html) function from `scipy.optimize` helpful.
###Code
# complete
# complete
# complete
sim_star = simulate(x, mu, fwhm, S, F)
psf_flux = # complete
print("The PSF flux is {:.3f}".format( # complete
###Output
_____no_output_____
###Markdown
**Problem 5c**Following **4a** write a function to simulate many realizations of the star and estimate the flux and variance using the PSF model.How does the PSF estimate compare to the aperture estimate?
###Code
# complete
# complete
# complete
# complete
# complete
print("The mean flux = {:.3f} with variance = {:.3f}".format( # complete
###Output
_____no_output_____
###Markdown
Introduction to Basic Stellar Photometry Measuring Flux in 1D**Version 0.1**In this notebook we will introduce some basic concepts related to measuring the flux of a point source. As this is an introduction, several challenges associated with image processing will be ignored or simplified (for example, we will simulate stars in 1 dimension). Nevertheless, the concepts below adapt nicely to charge coupled devices (CCDs) with a small field of view ($\ll 1\,\deg^{2}$), and where stellar crowding is minimal. A good reference for such problems is the [Handbook for CCD Astronomy](https://ui.adsabs.harvard.edu/abs/2006hca..book.....H/abstract) by Steve Howell. However, as we will see throughout this session, the basic methods introduced here are insufficient for an ~all sky survey with a large field-of-view detector, as will be conducted by the Vera C. Rubin Observatory. We will learn more about those challenges and solutions in later lectures.The problems below are inspired by Robert Lupton (who has forgotten more about image processing than I will ever know), so it may be worthwhile to checkout his [original notebook](https://github.com/LSSTC-DSFP/LSSTC-DSFP-Sessions/blob/master/Session1/Day2/ImageProcessing/Image%20Processing%20Workbook%20I.ipynb).* * *By AA Miller (CIERA/Northwestern & Adler)
###Code
import numpy as np
import matplotlib.pyplot as plt
%matplotlib notebook
###Output
_____no_output_____
###Markdown
Problem 1) An (oversimplified) 1-D ModelFor this introductory problem we are going to simulate a 1 dimensional detector. Simulated stars that are detected by said detector will have Gaussian profiles $\mathcal{N}(\mu, \sigma^2)$, with mean position $\mu$ and variance $\sigma^2$.As observed by LSST, all stars are point sources that reflect the point spread function (PSF), which is produced by a combination of the atmosphere, telescope, and detector. A standard measure of the PSF's width is the Full Width Half Maximum (FWHM). For a Gaussian the FWHM = $2\sqrt{2 \ln (2)} \sigma \approx 2.3548\,\sigma$.In addition to the signal from the stars, the 1D detector also detects a smooth background of light from several sources (the atmosphere, the detector, etc). We will refer to this background simply as "The Sky." **Problem 1a**Write a function `phi()` to simulate a (noise-free) 1D Gaussian PSF. The function should take `mu` and `fwhm` as arguments, and evaluate the PSF along a user-supplied array `x`.
###Code
from scipy.stats import norm
def phi(x, mu, fwhm):
"""Evalute the 1d PSF N(mu, sigma^2) along x
Parameters
----------
x : array-like of shape (n_pixels,)
detector pixel number
mu : float
mean position of the 1D star
fwhm : float
Full-width half-maximum of the stellar profile on the detector
Returns
-------
flux : array-like of shape (n_pixels,)
Flux in each pixel of the input array
"""
sigma = fwhm / (2 * np.sqrt(2 * np.log(2)))
flux = norm(loc=mu, scale=sigma)
return flux.pdf(x)
###Output
_____no_output_____
###Markdown
**Helper Function**CCDs measure pixelated signals. To clearly remind ourselves that that is the case, I have created a helper function that shows the pixelated counts in our 1D detector. The function assumes that the input positions are *the left edge* of the pixel.You can use this function throughout the notebook below.
###Code
from matplotlib.ticker import MultipleLocator
def pixel_plot(pix, counts):
'''Make a pixelated 1D plot'''
fig, ax = plt.subplots()
ax.step(pix, counts,
where='post')
ax.set_xlabel('pixel number')
ax.set_ylabel('relative counts')
ax.xaxis.set_minor_locator(MultipleLocator(1))
ax.xaxis.set_major_locator(MultipleLocator(5))
fig.tight_layout()
###Output
_____no_output_____
###Markdown
**Problem 1b**Plot the noise-free PSF for a star with a profile defined by $\mu = 10$ and $\mathrm{FWHM} = 3$. Estimate the total flux of this star by "integrating" over the counts measured by the detector. If you are clever in the definition of your pixels, this integration reduces to a sum.*Hint - think about your input grid of pixels. Can you have a non-integer number of pixels? Also - the flux should be evaluated at the center of the pixel.*
###Code
x = np.arange(21)
flux = phi(x, mu=10, fwhm=3)
pixel_plot(x, flux)
print("The flux of the star is: {:.3f}".format(np.sum(flux)))
###Output
_____no_output_____
###Markdown
**Problem 1c**Now add sky noise to the detector (for now we will assume the sky noise is constant in every pixel). We will define the the sky as `S`, and the total stellar flux as `F`.Plot the simulated counts for `S` = 100 and `F` = 500. (Use the same stellar profile as **1b**)
###Code
S = 100
F = 500
pixel_plot(x, flux * F + S)
###Output
_____no_output_____
###Markdown
Problem 2) Add NoiseFor optical detectors (such as those used by the Rubin Observatory), the noise $n$ in a pixel is almost entirely shot noise due to the finite number of photons that have been detected. Therefore, within each pixel if the "true" signal in the detector would produce $n$ photons, then the noise/uncertainty in each pixel follows a [Poisson distribution](https://en.wikipedia.org/wiki/Poisson_distribution), which has the property that the mean $\lambda$ is equal to the variance $\lambda$. If $n \gg 1$ then $P(\lambda) \approx \mathcal{N}(\lambda, \lambda)$. We will make this simplifying assumption for the remainder of this problem. **Problem 2a**Plot the noisy counts in each pixel for the simulated signal (star + sky) in **1c**. Visually compare these results to your previous plot.*Hint* - you may find the function [`np.random.normal()`](https://docs.scipy.org/doc/numpy-1.13.0/reference/generated/numpy.random.normal.html) or [`np.random.poisson()`](https://numpy.org/devdocs/reference/random/generated/numpy.random.poisson.html) helpful.
###Code
np.random.seed(2020)
signal = flux * F + S
signal_plus_noise = np.random.normal(loc=signal, scale=np.sqrt(signal))
pixel_plot(x, signal_plus_noise)
###Output
_____no_output_____
###Markdown
**Problem 2b**Estimate the flux of the star using the same method you used in **1b**.Does your estimate match your expectation? (recall that $F = 500$) If not, why not?
###Code
print('The total flux = {:.4f}'.format(signal_plus_noise.sum()))
###Output
The total flux = 2551.9633
###Markdown
*write your answer here*The measured flux includes the sky background, so to recover the true flux, one must subtract out the measured brightness of the sky: **Problem 2c**The flux has been measured incorrectly because we are counting photons from the "sky." Subtract the sky background S from the counts in the detector and once again estimate the flux using the same method as **1b**.Does your estimate match your expectation? (recall that $F = 500$) *Note - estimating the value of the "sky" background in real life is extremely difficult and an entire lecture will be devoted to this topic*
###Code
print('The total flux = {:.4f}'.format(np.sum(signal_plus_noise - S * np.ones_like(x))))
###Output
The total flux = 451.9633
###Markdown
*write your answer here*It seems to match within Poisson uncertainties We will now examine alternative methods of estimating the flux. Problem 3) Aperture Flux MeasurementsIn some sense, **2c** provides the most simplistic method for estimating the flux of star (add up all the counts after subtracting th background). This solution, however, cannot generalize to any sort of real life problem because there are always multiple stars (and galaxies) detected in every individual image. However, we can approximate the above procedure by "isolating" the individual stars in any image (say by taking a 10x10 cutout around each star), and then estimating the flux in a similar fashion. As we are about to show, the size of the box (or more appropriately a circle for 2D optical images) is critical when estimating the flux. **Problem 3a**Write a function `simulate()` to simulate the noisy count measurements of a star with centroid `mu`, FWHM `fwhm`, sky background `S`, and flux `F`.*Hint* - it may be helpful to plot the output of your function.
###Code
def simulate(x, mu, fwhm, S, F):
'''simulate a noisy stellar signal
Parameters
----------
x : array-like
detector pixel number
mu : float
mean position of the 1D star
fwhm : float
Full-width half-maximum of the stellar profile on the detector
S : float
Constant sky background in each pixel
F : float
Total stellar flux
Returns
-------
noisy_counts : array-like (same shape as x)
the (noisy) number of counts in each pixel
'''
flux = phi(x, mu, fwhm)
signal = flux * F + S
noisy_counts = np.random.normal(loc=signal, scale=np.sqrt(signal))
return noisy_counts
###Output
_____no_output_____
###Markdown
We will now perform aperture photometry. In optical astronomy, most apertures are a top-hat function, and the flux is estimated by multiplying the aperture by the (background-subtracted) signal and summing the resulting output. In 2D apertures are typically a circle, for the remainder of this 1D problem we will use a top-hat aperture. If the aperture contains partial pixels, then the counts in pixels with parial coverage are multiplied by the fractional coverage of the pixel. **Problem 3b** Using an aperture with a radius of 5 pixels centered on the source (i.e. the aperture is 10 pixels long), measure the flux from a star centered at `mu` = 20, with `fwhm` = 5, `S` = 100, and `F` = 1000. Assume you can perfectly measure the background, and subtract this prior to measuring the flux.**Extra long** *Hint* - if you create your aperture using a single `np.where()` command (or similar) you are not going to get the correct answer. To quickly see why this is the case, imagine you'd been asked to use an aperture with a radius of 3.2 pixels. Thus, properly calculating the aperture requires a bit of thought. A for loop is a fairly intuitive way to handle this problem (though it can also be done with a series of where commands, and both possibilities will be presented in the solutions). **DO NOT SPEND TOO MUCH TIME ON THIS PROBLEM** If you get stuck, use a single `np.where()` command. When you plot the results in the **3c** you'll see how things are off, but this won't affect the general results in the remainder of the notebook.
###Code
import math
def measure_flux_ap(x, sim_star, mu, S, ap_radius):
# Apply the aperature to the star profile
sky_counts, total_counts = 0.0, 0.0
for pix, flux_val in zip(x, sim_star):
diff = np.abs(mu - pix)
if math.floor(diff) < ap_radius:
#fully inside aperature
sky_counts += S
total_counts += flux_val
elif math.floor(diff) > math.floor(ap_radius):
#outside aperature
pass
else:
#fractionally inside aperature
frac = np.abs(pix - mu) - np.abs(mu - ap_radius)
sky_counts += S * frac
total_counts += flux_val * frac
measured_flux = np.sum(total_counts) - np.sum(sky_counts)
return measured_flux
def measure_flux_ap(x, sim_star, mu, S, ap_radius):
total_flux = sim_star[np.where(np.abs(x-mu) < ap_radius)]
return np.sum(total_flux) - len(total_flux) * S
x = np.linspace(0,40,41)
mu = 20
S = 100
fwhm = 5
F = 1000
sim_star = simulate(x, mu, fwhm, S, F)
ap_radius = 5
measured_flux = measure_flux_ap(x, sim_star, mu, S, ap_radius)
print("The star has flux = {:.3f}".format(measured_flux))
###Output
The star has flux = 986.026
###Markdown
**Problem 3c**Plot the simulated counts from **3b** and overplot your aperture. You may want to multiple the aperture by a factor of 100 to see it better.*Hint* – after you have run `pixel_plot()` you will need to call matplotlib functions directly (e.g., `plt.plot`) to overplot on the pixel data. Also, if you created an `aperture_mask` in **3b** it may help to plot that as well.
###Code
pixel_plot(x, sim_star)
# add aperture
plt.axvline(mu - ap_radius, c='black', ls='--')
plt.axvline(mu + ap_radius, c='black', ls='--')
###Output
_____no_output_____
###Markdown
**Problem 3c** Write a Monte Carlo simulator to estimate the mean and standard deviation of the flux from the simulated star.*Food for thought* - what do you notice if you run your simulator many times?
###Code
sim_fluxes = np.empty(1000)
for sim_num in range(len(sim_fluxes)):
sim_star = simulate(x, mu, 5, S, 1000)
ap_radius = 5
measured_flux = measure_flux_ap(x, sim_star, mu, S, ap_radius)
sim_fluxes[sim_num] = measured_flux
print("The mean flux = {:.3f} with variance = {:.3f}".format(np.mean(sim_fluxes), np.std(sim_fluxes)))
###Output
The mean flux = 968.736 with variance = 42.722
###Markdown
Problem 4) Avoid Arbitrary Numbers -- the Curve of GrowthIn **Problem 3** we decided to use an aperture radius of 5. Why? (In practice, an aperture radius equal to the FWHM is a pretty good choice, as we will show below. However, this is not optimal in all situations)We will now try to optimize the choice of aperture for the star in question. **Problem 4a**Using your solution to **Problem 3**, write a function `aperture_flux()` that estimates the mean flux and it's variance in a given aperture of size `ap_radius` for a simulated star. *Hint* - this function is going to have many inputs, `ap_radius`, the position of the star, the flux of the star, the FWHM of the star, a pixel grid, the value of the sky background, and the number of simulations per input radius.
###Code
def aperture_flux(ap_radius,
mu,
F,
fwhm,
x,
S,
num):
sim_fluxes = np.empty(num)
for sim_num in range(len(sim_fluxes)):
sim_star = simulate(x, mu, fwhm, S, F)
measured_flux = measure_flux_ap(x, sim_star, mu, S, ap_radius)
sim_fluxes[sim_num] = measured_flux
return np.mean(sim_fluxes), np.std(sim_fluxes)
###Output
_____no_output_____
###Markdown
**Problem 4b**Confirm your function works by calculating the mean and variance of the flux in a 5 pixel radius aperture.
###Code
x = np.linspace(0,40,41)
mu = 20
S = 100
fwhm = 5
F = 1000
mean, var = aperture_flux(5, mu, F, fwhm, x, S, 1000)
print('The mean flux in a r = 5 pix aperture is {:.4f} +/- {:.4f}'.format(mean, var))
###Output
The mean flux in a r = 5 pix aperture is 965.3258 +/- 41.8276
###Markdown
**Problem 4c**Build successively larger apertures with sizes increasing from a radius of 1 to 10 pixels. Measure the mean and variance for each aperture size. Plot the results.Which aperture size has the smallest variance? Is this aperture best? Do these results make sense?
###Code
ap_array = np.linspace(1, 10, 20)
mean, var = [], []
for ap_num, ap_rad in enumerate(ap_array):
mean_, var_ = aperture_flux(ap_rad, mu, F, fwhm, x, S, 1000)
mean.append(mean_)
var.append(var_)
fig, ax = plt.subplots()
ax.errorbar(ap_array, mean, yerr=var)
print('aperture radius = {} has the smallest variance'.format(ap_array[np.argmin(var)]))
###Output
_____no_output_____
###Markdown
*write your answer here* Small apertures fail to measure all the light from the source. Large apertures do measure all the light, but at the cost of higher variance. In practice, these challenges can be alleviated if the point spread function is known. (This is a challenging problem and the subject of an entire lecture this week, as the PSF is essentially never known a priori and must be estimated from the images themselves.)In this case, we know the PSF is a 1D Gaussian. We can therefore calculate "aperture corrections" to determine the flux at any radius on the above plot (known as the curve of growth -- in some cases the aperture corrections can be determined directly from the curve of growth but that can be challenging on real images, as things like stellar blends remove all the simplicity of the single star problem that we have here). To determine the aperture correction at any radius $r$, we can simply integrate a Gaussian (our know PSF for this simulated problem) over the size of the aperture and then divide the aperture flux (and standard deviation) by this result to estimate the true flux in each aperture.This can easily be done for our 1D Gaussian with scipy.
###Code
from scipy.stats import norm
def curve_of_growth(r):
'''Return aperture correction for aperture of size r
Parameters
----------
r : float
radius of the aperture, in units of the
Gaussian standard deviation
Returns
-------
apcor : float
the aperture correction at radius r
'''
return norm.cdf(r) - norm.cdf(-r)
###Output
_____no_output_____
###Markdown
**Problem 4d**Calculate the analytic curve of growth for each of your apertures from **4c**. Re-plot the (corrected) flux in each aperture. Do you notice anything different? *Hint* – recall the relation between FWHM and the standard deviation for a Gaussian.
###Code
cog = curve_of_growth(ap_array)
fig, ax = plt.subplots()
ax.errorbar(ap_array, mean / cog, yerr=var / cog)
###Output
_____no_output_____
###Markdown
**Problem 4e**Plot the uncertainty on the flux estimate (i.e., the square root of the variance) as a function of aperture radius.Now which aperture size do you think is best?
###Code
fig, ax = plt.subplots()
ax.plot(ap_array, var / cog)
ax.set_xlabel('aperture radius (pix)')
ax.set_ylabel('uncertainty in F')
fig.tight_layout()
###Output
_____no_output_____
###Markdown
*write your answer here* Here we have discovered a universal truth about aperture photometry: very small and very large apertures produce lower signal-to-noise estimates than something in between. However, the optimal value of that something in between is different for every star (as you will show below). **Problem 4f**Leaving all other variables the same, estimate the optimal aperture size (i.e. maximize the signal-to-noise ratio) for a star with a flux of 10000.What is the optimal aperture size?*Hint* –– you only need to repeat **4c** and **4e** for this answer.
###Code
# complete
# complete
# complete
# complete
# complete
# complete
print('The optimal aperture is {} pix'.format( # complete
###Output
_____no_output_____
###Markdown
**Problem 4g**Leaving all other variables the same, estimate the optimal aperture size (i.e. maximize the signal-to-noise ratio) for a star with a flux of 10.What is the optimal aperture size? Can you even measure the flux of this star?
###Code
# complete
# complete
# complete
# complete
# complete
# complete
print('The optimal aperture is {} pix'.format( # complete
###Output
_____no_output_____
###Markdown
UpshotDropping simple apertures on an image provides a fast and simple method to estimate the flux of a star. This approach comes at a cost, however, as the aperture method employed here provides high variance estimates of the flux. Fortunately, it is possible to do much better via PSF photometry (and in fact, the [Cramer-Rao bound](https://en.wikipedia.org/wiki/Cram%C3%A9r%E2%80%93Rao_bound) mathematically proves that PSF photometry is the lowest variance estimator of the flux of a star). This means that *aperture photometry is never better than PSF photometry* despite some claims to contrary in the literature. There are cases where the PSF is extremely difficult to estimate, in which case aperture photometry may be the only decent way to estimate the flux, but even then PSF photometry would be better. (Technically speaking, aperture photometry *is* PSF photometry. The catch is that the PSF model (a 1D or circular top hat) is a terrible match to the actual aparition of the stars on the image. When the model of the PSF is good, and in the case of our simulated data set we know the PSF perfectly, then PSF flux estimates will be a minimum variance estimator.) Problem 5/Challenge Problem) PSF Flux measurementWe are going to cover PSF modeling and PSF photometry in far greater detail later this week, but here we are going to quickly meausure the flux using a model of the PSF, which we will compare to the aperture results. **Problem 5a**Create the psf model, `psf`, which is equivalent to a noise-free star with `fwhm` = 5.
###Code
psf = # complete
###Output
_____no_output_____
###Markdown
**Problem 5b** Using the same parameters as problem 3, simulate a star and measure it's PSF flux.*Hint* - you may find the [`minimize`](https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.minimize.html) function from `scipy.optimize` helpful.
###Code
# complete
# complete
# complete
sim_star = simulate(x, mu, fwhm, S, F)
psf_flux = # complete
print("The PSF flux is {:.3f}".format( # complete
###Output
_____no_output_____
###Markdown
**Problem 5c**Following **4a** write a function to simulate many realizations of the star and estimate the flux and variance using the PSF model.How does the PSF estimate compare to the aperture estimate?
###Code
# complete
# complete
# complete
# complete
# complete
print("The mean flux = {:.3f} with variance = {:.3f}".format( # complete
###Output
_____no_output_____
|
notebook/00-test/ZeroDivisionError.ipynb
|
###Markdown
ZeroDivisionError
###Code
print("ZeroDivisionError")
raise ZeroDivisionError
###Output
_____no_output_____
|
topicnet/demos/20NG-Preprocessing.ipynb
|
###Markdown
20NG (Twenty Newsgroups). PreprocessingHere goes an example of data preprocessing and converting it to TopicNet's Dataset format.* Example of a toy dataset: [test_dataset.csv](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/tests/test_data/test_dataset.csv)* Dataset source file (with some explanations in docstring): [dataset.py](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/cooking_machine/dataset.py) Contents* [Loading data](data-loading)* [Preparing data](data-preparation)
###Code
import numpy as np
import pandas as pd
import re
import shutil
import string
from collections import Counter
from glob import glob
from sklearn import datasets
from sklearn.datasets import fetch_20newsgroups
import nltk
from nltk.collocations import (
BigramAssocMeasures,
BigramCollocationFinder,
)
from nltk.corpus import (
stopwords,
wordnet,
)
from nltk.stem import WordNetLemmatizer
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib import cm
###Output
_____no_output_____
###Markdown
Loading dataBack to Contents Let's download the dataset:
###Code
train_20 = fetch_20newsgroups(
subset='train',
remove=('headers', 'footers', 'quotes'),
)
test_20 = fetch_20newsgroups(
subset='test',
remove=('headers', 'footers', 'quotes'),
)
train_20.pop('DESCR')
labels = train_20.pop('target_names')
for k in train_20.keys():
print(len(train_20[k]), k)
test_20.pop('DESCR')
labels_test = test_20.pop('target_names')
for k in test_20.keys():
print(len(test_20[k]), k)
###Output
7532 data
7532 filenames
7532 target
###Markdown
Preparing data (lemmatization, Vowpal Wabbit & TopicNet's format)Back to Contents Wrapping all in .csv files:
###Code
train_pd = pd.DataFrame(train_20).rename(columns = {'data':'raw_text'},)
# train_pd['raw_text'] = train_pd['raw_text'].apply(lambda x: x.decode('windows-1252'))
train_pd['id'] = train_pd.filenames.apply( lambda x: '.'.join(x.split('/')[-2:]).replace('.','_'))
test_pd = pd.DataFrame(test_20).rename(columns = {'data':'raw_text'})
# test_pd['raw_text'] = test_pd['raw_text'].apply(lambda x: x.decode('windows-1252'))
test_pd['id'] = test_pd.filenames.apply( lambda x: '.'.join(x.split('/')[-2:]))
###Output
_____no_output_____
###Markdown
Better to exclude these documents (one may look here [20-newsgroups-secrets](https://github.com/Alvant/20-newsgroups-secrets) for more details).
###Code
bad_names = [9976, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9990]
bad_names = [f"comp_os_ms-windows_misc_{i}" for i in bad_names]
bad_indices = train_pd.query("id in @bad_names").index
###Output
_____no_output_____
###Markdown
Below we define some functions for text preprocessing.
###Code
def nltk2wn_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return ''
pattern = re.compile('\S*@\S*\s?')
def vowpalize_sequence(sequence):
word_2_frequency = Counter(sequence)
del word_2_frequency['']
vw_string = ''
for word in word_2_frequency:
vw_string += word + ":" + str(word_2_frequency[word]) + ' '
return vw_string
def do_vw_for_me_please(dataframe):
bad_entries = []
tokenized_text = []
for indx, text in enumerate(dataframe['raw_text'].values):
try:
text = str(pattern.sub('', text))
except TypeError:
text=''
tokens = [tok for tok in nltk.wordpunct_tokenize(text.lower()) if len(tok) > 1]
tokenized_text.append(nltk.pos_tag(tokens))
dataframe['tokenized'] = tokenized_text
stop = set(stopwords.words('english'))
lemmatized_text = []
wnl = WordNetLemmatizer()
for text in dataframe['tokenized'].values:
lemmatized = [wnl.lemmatize(word, nltk2wn_tag(pos))
if nltk2wn_tag(pos) != ''
else wnl.lemmatize(word)
for word, pos in text ]
lemmatized = [word for word in lemmatized
if word not in stop and word.isalpha()]
lemmatized_text.append(lemmatized)
dataframe['lemmatized'] = lemmatized_text
bigram_measures = BigramAssocMeasures()
finder = BigramCollocationFinder.from_documents(dataframe['lemmatized'])
finder.apply_freq_filter(5)
set_dict = set(finder.nbest(bigram_measures.pmi,32100)[100:])
documents = dataframe['lemmatized']
bigrams = []
for doc in documents:
entry = ['_'.join([word_first, word_second])
for word_first, word_second in zip(doc[:-1],doc[1:])
if (word_first, word_second) in set_dict]
bigrams.append(entry)
dataframe['bigram'] = bigrams
vw_text = []
for index, data in dataframe.iterrows():
vw_string = ''
doc_id = data.id
lemmatized = '@lemmatized ' + vowpalize_sequence(data.lemmatized)
bigram = '@bigram ' + vowpalize_sequence(data.bigram)
vw_string = ' |'.join([doc_id, lemmatized, bigram])
vw_text.append(vw_string)
dataframe['vw_text'] = vw_text
print('num bad entries ', len(bad_entries))
print(bad_entries)
return dataframe
###Output
_____no_output_____
###Markdown
And here are the final datasets!Each row represents a document.Columns `id`, `raw_text` and `vw_text` are required (look at this [toy dataset](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/tests/test_data/test_dataset.csv), for example).
###Code
train_pd = do_vw_for_me_please(train_pd)
display(train_pd.head())
test_pd = do_vw_for_me_please(test_pd)
display(test_pd.head())
###Output
num bad entries 0
[]
###Markdown
Saving to disk (TopicNet's [Dataset](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/cooking_machine/dataset.py) can be constructed using saved .csv file with text data).
###Code
! mkdir 20_News_dataset
train_pd.drop(bad_indices).to_csv('/data/datasets/20_News_dataset/train_preprocessed.csv')
test_pd.to_csv('/data/datasets/20_News_dataset/test_preprocessed.csv')
###Output
_____no_output_____
###Markdown
20NG (Twenty Newsgroups). PreprocessingHere goes an example of data preprocessing and converting it to TopicNet's Dataset format.* Example of a toy dataset: [test_dataset.csv](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/tests/test_data/test_dataset.csv)* Dataset source file (with some explanations in docstring): [dataset.py](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/cooking_machine/dataset.py) Contents* [Loading data](data-loading)* [Preparing data](data-preparation)
###Code
import numpy as np
import pandas as pd
import re
import shutil
import string
from collections import Counter
from glob import glob
from sklearn import datasets
from sklearn.datasets import fetch_20newsgroups
import nltk
from nltk.collocations import (
BigramAssocMeasures,
BigramCollocationFinder,
)
from nltk.corpus import (
stopwords,
wordnet,
)
from nltk.stem import WordNetLemmatizer
import matplotlib.pyplot as plt
%matplotlib inline
from matplotlib import cm
###Output
_____no_output_____
###Markdown
Loading dataBack to Contents Let's download the dataset:
###Code
train_20 = fetch_20newsgroups(
subset='train',
remove=('headers', 'footers', 'quotes'),
)
test_20 = fetch_20newsgroups(
subset='test',
remove=('headers', 'footers', 'quotes'),
)
train_20.pop('DESCR')
labels = train_20.pop('target_names')
for k in train_20.keys():
print(len(train_20[k]), k)
test_20.pop('DESCR')
labels_test = test_20.pop('target_names')
for k in test_20.keys():
print(len(test_20[k]), k)
###Output
7532 data
7532 filenames
7532 target
###Markdown
Preparing data (lemmatization, Vowpal Wabbit & TopicNet's format)Back to Contents Wrapping all in .csv files:
###Code
train_pd = pd.DataFrame(train_20).rename(columns = {'data':'raw_text'},)
# train_pd['raw_text'] = train_pd['raw_text'].apply(lambda x: x.decode('windows-1252'))
train_pd['id'] = train_pd.filenames.apply( lambda x: '.'.join(x.split('/')[-2:]).replace('.','_'))
test_pd = pd.DataFrame(test_20).rename(columns = {'data':'raw_text'})
# test_pd['raw_text'] = test_pd['raw_text'].apply(lambda x: x.decode('windows-1252'))
test_pd['id'] = test_pd.filenames.apply( lambda x: '.'.join(x.split('/')[-2:]))
###Output
_____no_output_____
###Markdown
Better to exclude these documents (one may look here [20-newsgroups-secrets](https://github.com/Alvant/20-newsgroups-secrets) for more details).
###Code
bad_names = [9976, 9977, 9978, 9979, 9980, 9981, 9982, 9983, 9984, 9985, 9986, 9987, 9988, 9990]
bad_names = [f"comp_os_ms-windows_misc_{i}" for i in bad_names]
bad_indices = train_pd.query("id in @bad_names").index
###Output
_____no_output_____
###Markdown
Below we define some functions for text preprocessing.
###Code
def nltk2wn_tag(nltk_tag):
if nltk_tag.startswith('J'):
return wordnet.ADJ
elif nltk_tag.startswith('V'):
return wordnet.VERB
elif nltk_tag.startswith('N'):
return wordnet.NOUN
elif nltk_tag.startswith('R'):
return wordnet.ADV
else:
return ''
pattern = re.compile('\S*@\S*\s?')
def vowpalize_sequence(sequence):
word_2_frequency = Counter(sequence)
del word_2_frequency['']
vw_string = ''
for word in word_2_frequency:
vw_string += word + ":" + str(word_2_frequency[word]) + ' '
return vw_string
def do_vw_for_me_please(dataframe):
bad_entries = []
tokenized_text = []
for indx, text in enumerate(dataframe['raw_text'].values):
try:
text = str(pattern.sub('', text))
except TypeError:
text=''
tokens = [tok for tok in nltk.wordpunct_tokenize(text.lower()) if len(tok) > 1]
tokenized_text.append(nltk.pos_tag(tokens))
dataframe['tokenized'] = tokenized_text
stop = set(stopwords.words('english'))
lemmatized_text = []
wnl = WordNetLemmatizer()
for text in dataframe['tokenized'].values:
lemmatized = [wnl.lemmatize(word, nltk2wn_tag(pos))
if nltk2wn_tag(pos) != ''
else wnl.lemmatize(word)
for word, pos in text ]
lemmatized = [word for word in lemmatized
if word not in stop and word.isalpha()]
lemmatized_text.append(lemmatized)
dataframe['lemmatized'] = lemmatized_text
bigram_measures = BigramAssocMeasures()
finder = BigramCollocationFinder.from_documents(dataframe['lemmatized'])
finder.apply_freq_filter(5)
set_dict = set(finder.nbest(bigram_measures.pmi,32100)[100:])
documents = dataframe['lemmatized']
bigrams = []
for doc in documents:
entry = ['_'.join([word_first, word_second])
for word_first, word_second in zip(doc[:-1],doc[1:])
if (word_first, word_second) in set_dict]
bigrams.append(entry)
dataframe['bigram'] = bigrams
vw_text = []
for index, data in dataframe.iterrows():
vw_string = ''
doc_id = data.id
lemmatized = '@lemmatized ' + vowpalize_sequence(data.lemmatized)
bigram = '@bigram ' + vowpalize_sequence(data.bigram)
vw_string = ' |'.join([doc_id, lemmatized, bigram])
vw_text.append(vw_string)
dataframe['vw_text'] = vw_text
print('num bad entries ', len(bad_entries))
print(bad_entries)
return dataframe
###Output
_____no_output_____
###Markdown
And here are the final datasets!Each row represents a document.Columns `id`, `raw_text` and `vw_text` are required (look at this [toy dataset](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/tests/test_data/test_dataset.csv), for example).
###Code
train_pd = do_vw_for_me_please(train_pd)
display(train_pd.head())
test_pd = do_vw_for_me_please(test_pd)
display(test_pd.head())
###Output
num bad entries 0
[]
###Markdown
Saving to disk (TopicNet's [Dataset](https://github.com/machine-intelligence-laboratory/TopicNet/blob/master/topicnet/cooking_machine/dataset.py) can be constructed using saved .csv file with text data).
###Code
! mkdir 20_News_dataset
train_pd.drop(bad_indices).to_csv('20_News_dataset/train_preprocessed.csv')
test_pd.to_csv('20_News_dataset/test_preprocessed.csv')
###Output
_____no_output_____
|
module_2/medinadiego_2_assignment_kaggle_challenge_2.ipynb
|
###Markdown
Lambda School Data Science, Unit 2: Predictive Modeling Kaggle Challenge, Module 2 Assignment- [ ] Read [“Adopting a Hypothesis-Driven Workflow”](https://outline.com/5S5tsB), a blog post by a Lambda DS student about the Tanzania Waterpumps challenge.- [ ] Continue to participate in our Kaggle challenge.- [ ] Try Ordinal Encoding.- [ ] Try a Random Forest Classifier.- [ ] Submit your predictions to our Kaggle competition. (Go to our Kaggle InClass competition webpage. Use the blue **Submit Predictions** button to upload your CSV file. Or you can use the Kaggle API to submit your predictions.)- [ ] Commit your notebook to your fork of the GitHub repo. Stretch Goals Doing- [ ] Add your own stretch goal(s) !- [ ] Do more exploratory data analysis, data cleaning, feature engineering, and feature selection.- [ ] Try other [categorical encodings](https://contrib.scikit-learn.org/categorical-encoding/).- [ ] Get and plot your feature importances.- [ ] Make visualizations and share on Slack. ReadingTop recommendations in _**bold italic:**_ Decision Trees- A Visual Introduction to Machine Learning, [Part 1: A Decision Tree](http://www.r2d3.us/visual-intro-to-machine-learning-part-1/), and _**[Part 2: Bias and Variance](http://www.r2d3.us/visual-intro-to-machine-learning-part-2/)**_- [Decision Trees: Advantages & Disadvantages](https://christophm.github.io/interpretable-ml-book/tree.htmladvantages-2)- [How a Russian mathematician constructed a decision tree — by hand — to solve a medical problem](http://fastml.com/how-a-russian-mathematician-constructed-a-decision-tree-by-hand-to-solve-a-medical-problem/)- [How decision trees work](https://brohrer.github.io/how_decision_trees_work.html)- [Let’s Write a Decision Tree Classifier from Scratch](https://www.youtube.com/watch?v=LDRbO9a6XPU) Random Forests- [_An Introduction to Statistical Learning_](http://www-bcf.usc.edu/~gareth/ISL/), Chapter 8: Tree-Based Methods- [Coloring with Random Forests](http://structuringtheunstructured.blogspot.com/2017/11/coloring-with-random-forests.html)- _**[Random Forests for Complete Beginners: The definitive guide to Random Forests and Decision Trees](https://victorzhou.com/blog/intro-to-random-forests/)**_ Categorical encoding for trees- [Are categorical variables getting lost in your random forests?](https://roamanalytics.com/2016/10/28/are-categorical-variables-getting-lost-in-your-random-forests/)- [Beyond One-Hot: An Exploration of Categorical Variables](http://www.willmcginnis.com/2015/11/29/beyond-one-hot-an-exploration-of-categorical-variables/)- _**[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)**_- _**[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)**_- [Mean (likelihood) encodings: a comprehensive study](https://www.kaggle.com/vprokopev/mean-likelihood-encodings-a-comprehensive-study)- [The Mechanics of Machine Learning, Chapter 6: Categorically Speaking](https://mlbook.explained.ai/catvars.html) Imposter Syndrome- [Effort Shock and Reward Shock (How The Karate Kid Ruined The Modern World)](http://www.tempobook.com/2014/07/09/effort-shock-and-reward-shock/)- [How to manage impostor syndrome in data science](https://towardsdatascience.com/how-to-manage-impostor-syndrome-in-data-science-ad814809f068)- ["I am not a real data scientist"](https://brohrer.github.io/imposter_syndrome.html)- _**[Imposter Syndrome in Data Science](https://caitlinhudon.com/2018/01/19/imposter-syndrome-in-data-science/)**_ More Categorical Encodings**1.** The article **[Categorical Features and Encoding in Decision Trees](https://medium.com/data-design/visiting-categorical-features-and-encoding-in-decision-trees-53400fa65931)** mentions 4 encodings:- **"Categorical Encoding":** This means using the raw categorical values as-is, not encoded. Scikit-learn doesn't support this, but some tree algorithm implementations do. For example, [Catboost](https://catboost.ai/), or R's [rpart](https://cran.r-project.org/web/packages/rpart/index.html) package.- **Numeric Encoding:** Synonymous with Label Encoding, or "Ordinal" Encoding with random order. We can use [category_encoders.OrdinalEncoder](https://contrib.scikit-learn.org/categorical-encoding/ordinal.html).- **One-Hot Encoding:** We can use [category_encoders.OneHotEncoder](http://contrib.scikit-learn.org/categorical-encoding/onehot.html).- **Binary Encoding:** We can use [category_encoders.BinaryEncoder](http://contrib.scikit-learn.org/categorical-encoding/binary.html).**2.** The short video **[Coursera — How to Win a Data Science Competition: Learn from Top Kagglers — Concept of mean encoding](https://www.coursera.org/lecture/competitive-data-science/concept-of-mean-encoding-b5Gxv)** introduces an interesting idea: use both X _and_ y to encode categoricals.Category Encoders has multiple implementations of this general concept:- [CatBoost Encoder](http://contrib.scikit-learn.org/categorical-encoding/catboost.html)- [James-Stein Encoder](http://contrib.scikit-learn.org/categorical-encoding/jamesstein.html)- [Leave One Out](http://contrib.scikit-learn.org/categorical-encoding/leaveoneout.html)- [M-estimate](http://contrib.scikit-learn.org/categorical-encoding/mestimate.html)- [Target Encoder](http://contrib.scikit-learn.org/categorical-encoding/targetencoder.html)- [Weight of Evidence](http://contrib.scikit-learn.org/categorical-encoding/woe.html)Category Encoder's mean encoding implementations work for regression problems or binary classification problems. For multi-class classification problems, you will need to temporarily reformulate it as binary classification. For example:```pythonencoder = ce.TargetEncoder(min_samples_leaf=..., smoothing=...) Both parameters > 1 to avoid overfittingX_train_encoded = encoder.fit_transform(X_train, y_train=='functional')X_val_encoded = encoder.transform(X_train, y_val=='functional')```For this reason, mean encoding won't work well within pipelines for multi-class classification problems.**3.** The **[dirty_cat](https://dirty-cat.github.io/stable/)** library has a Target Encoder implementation that works with multi-class classification.```python dirty_cat.TargetEncoder(clf_type='multiclass-clf')```It also implements an interesting idea called ["Similarity Encoder" for dirty categories](https://www.slideshare.net/GaelVaroquaux/machine-learning-on-non-curated-data-154905090).However, it seems like dirty_cat doesn't handle missing values or unknown categories as well as category_encoders does. And you may need to use it with one column at a time, instead of with your whole dataframe.**4. [Embeddings](https://www.kaggle.com/learn/embeddings)** can work well with sparse / high cardinality categoricals._**I hope it’s not too frustrating or confusing that there’s not one “canonical” way to encode categorcals. It’s an active area of research and experimentation! Maybe you can make your own contributions!**_ SetupYou can work locally (follow the [local setup instructions](https://lambdaschool.github.io/ds/unit2/local/)) or on Colab (run the code cell below).
###Code
%%capture
import sys
# If you're on Colab:
if 'google.colab' in sys.modules:
DATA_PATH = 'https://raw.githubusercontent.com/LambdaSchool/DS-Unit-2-Kaggle-Challenge/master/data/'
!pip install category_encoders==2.*
# If you're working locally:
else:
DATA_PATH = '../data/'
import pandas as pd
from sklearn.model_selection import train_test_split
train = pd.merge(pd.read_csv(DATA_PATH+'waterpumps/train_features.csv'),
pd.read_csv(DATA_PATH+'waterpumps/train_labels.csv'))
test = pd.read_csv(DATA_PATH+'waterpumps/test_features.csv')
sample_submission = pd.read_csv(DATA_PATH+'waterpumps/sample_submission.csv')
train.shape, test.shape
train, validation = train_test_split(train, random_state=10)
###Output
_____no_output_____
###Markdown
feature engineering
###Code
import numpy as np
def replacing_nulls(df):
cols = df.columns
cols = list(cols)
those_null = []
for col in cols:
if df[col].isnull().any() == False:
continue
df[col] = df[col].replace(0, np.nan)
those_null.append(col)
return those_null
replacing_nulls(train)
replacing_nulls(validation)
replacing_nulls(test)
def replacing_dates(df):
df['date_recorded'] = pd.to_datetime(df['date_recorded'], infer_datetime_format=True)
df['year_recorded'] = df['date_recorded'].dt.year
df['month_recorded'] = df['date_recorded'].dt.month
df['day_recorded'] = df['date_recorded'].dt.day
replacing_dates(train)
replacing_dates(validation)
replacing_dates(test)
train = train.drop(columns='date_recorded')
validation = validation.drop(columns='date_recorded')
test = test.drop(columns='date_recorded')
validation.head()
train.describe(include=['O'])
# non_numeric = train.describe(include=['O'])
# non_list = list(non_numeric.columns)
target = 'status_group'
# dataframe with all train columns except the target
train_features = train.drop(columns=[target])
# get all column names
features = train_features.columns.tolist()
###Output
_____no_output_____
###Markdown
baseline
###Code
x_train = train[features]
y_train = train[target]
x_val = validation[features]
y_val = validation[target]
x_test = test[features]
from sklearn.metrics import accuracy_score
majority_class_2 = y_val.mode()[0]
y_predict = [majority_class_2] * len(y_val)
ac_v = accuracy_score(y_val, y_predict)
print('Validation accuracy score: ', str(ac_v))
x_train.head()
import category_encoders as ce
from sklearn.ensemble import RandomForestClassifier
from sklearn.impute import SimpleImputer
from sklearn.pipeline import make_pipeline
pipeline = make_pipeline(
ce.OrdinalEncoder(),
SimpleImputer(strategy='mean'),
RandomForestClassifier(n_estimators=200, random_state=10, n_jobs=-1)
)
# Fit on train, score on val
pipeline.fit(x_train, y_train)
print('Validation Accuracy', pipeline.score(x_val, y_val))
# Pipeline’s named_steps attribute allows accessing steps by name
encoder = pipeline.named_steps['ordinalencoder']
#encoded = ce.OrdinalEncoder(x_train)
encoded = encoder.transform(x_train)
encoded.head()
random_forest = pipeline.named_steps['randomforestclassifier']
important = pd.Series(random_forest.feature_importances_, encoded.columns)
important_list = list(important.sort_values()[-10:].index)
important.sort_values()[-10:]
def commonalities(ls1, ls2):
common = []
for i in ls1:
for j in ls2:
if j == i:
if j not in common:
common.append(j)
return common
commonalities(non_list, important_list)
common = commonalities(non_list, important_list)
print(f'These were categorical: {common}')
%matplotlib inline
import matplotlib.pyplot as plt
# Get feature importances
#random_forest = pipeline.named_steps['randomforestclassifier']
#importances = pd.Series(random_forest.feature_importances_, encoded.columns)
# Plot feature importances
n = 20
#common = commonalities
plt.figure(figsize=(10,n/2))
plt.title(f'Top {n} features')
important.sort_values()[-n:].plot.barh(color='grey');
y_pred = pipeline.predict(x_test)
print('prediction: ', y_pred)
submission = sample_submission.copy()
submission['status_group'] = y_pred
submission.head()
submission.to_csv('medinadiegokaggle_2.csv', index=False)
from google.colab import files
files.download('medinadiegokaggle_2.csv')
###Output
_____no_output_____
|
Model backlog/Deep Learning/MobileNetV2/[148th] - MobileNetV2 - Preprocess - HFlip.ipynb
|
###Markdown
Model parameters
###Code
# Model parameters
BATCH_SIZE = 128
EPOCHS = 30
WARMUP_EPOCHS = 2
LEARNING_RATE = 0.0001
WARMUP_LEARNING_RATE = 0.001
HEIGHT = 224
WIDTH = 224
CANAL = 3
N_CLASSES = labels.shape[0]
ES_PATIENCE = 3
DECAY_DROP = 0.5
DECAY_EPOCHS = 10
def custom_f2(y_true, y_pred):
beta = 2
tp = np.sum((y_true == 1) & (y_pred == 1))
tn = np.sum((y_true == 0) & (y_pred == 0))
fp = np.sum((y_true == 0) & (y_pred == 1))
fn = np.sum((y_true == 1) & (y_pred == 0))
p = tp / (tp + fp + K.epsilon())
r = tp / (tp + fn + K.epsilon())
f2 = (1+beta**2)*p*r / (p*beta**2 + r + 1e-15)
return f2
def find_best_fixed_threshold(preds, targs, do_plot=True):
score = []
thrs = np.arange(0, 0.5, 0.01)
for thr in thrs:
score.append(custom_f2(targs, (preds > thr).astype(int)))
score = np.array(score)
pm = score.argmax()
best_thr, best_score = thrs[pm], score[pm].item()
print(f'thr={best_thr:.3f}', f'F2={best_score:.3f}')
if do_plot:
plt.plot(thrs, score)
plt.vlines(x=best_thr, ymin=score.min(), ymax=score.max())
plt.text(best_thr+0.03, best_score-0.01, f'$F_{2}=${best_score:.3f}', fontsize=14);
plt.show()
return best_thr, best_score
def step_decay(epoch):
initial_lrate = LEARNING_RATE
drop = DECAY_DROP
epochs_drop = DECAY_EPOCHS
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
train_datagen=ImageDataGenerator(rescale=1./255, validation_split=0.25,
horizontal_flip=True)
train_generator=train_datagen.flow_from_dataframe(
dataframe=train,
directory="../input/imet-2019-fgvc6/train",
x_col="id",
y_col="attribute_ids",
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical",
target_size=(HEIGHT, WIDTH),
subset='training')
valid_generator=train_datagen.flow_from_dataframe(
dataframe=train,
directory="../input/imet-2019-fgvc6/train",
x_col="id",
y_col="attribute_ids",
batch_size=BATCH_SIZE,
shuffle=True,
class_mode="categorical",
target_size=(HEIGHT, WIDTH),
subset='validation')
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_dataframe(
dataframe=test,
directory = "../input/imet-2019-fgvc6/test",
x_col="id",
target_size=(HEIGHT, WIDTH),
batch_size=1,
shuffle=False,
class_mode=None)
###Output
Found 81928 images belonging to 1103 classes.
Found 27309 images belonging to 1103 classes.
Found 7443 images.
###Markdown
Model
###Code
def create_model(input_shape, n_out):
input_tensor = Input(shape=input_shape)
base_model = applications.MobileNetV2(weights=None, include_top=False, input_tensor=input_tensor)
base_model.load_weights('../input/mobilenet-v2-tf-kernels-96-no-top/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_1.0_96_no_top.h5')
x = GlobalAveragePooling2D()(base_model.output)
x = Dropout(0.5)(x)
x = Dense(1024, activation='relu')(x)
x = Dropout(0.5)(x)
final_output = Dense(n_out, activation='sigmoid', name='final_output')(x)
model = Model(input_tensor, final_output)
return model
model = create_model(input_shape=(HEIGHT, WIDTH, CANAL), n_out=N_CLASSES)
for layer in model.layers:
layer.trainable = False
for i in range(-5,0):
model.layers[i].trainable = True
metrics = ["accuracy", "categorical_accuracy"]
optimizer = optimizers.Adam(lr=WARMUP_LEARNING_RATE)
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=metrics)
model.summary()
###Output
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/op_def_library.py:263: colocate_with (from tensorflow.python.framework.ops) is deprecated and will be removed in a future version.
Instructions for updating:
Colocations handled automatically by placer.
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py:3445: calling dropout (from tensorflow.python.ops.nn_ops) with keep_prob is deprecated and will be removed in a future version.
Instructions for updating:
Please use `rate` instead of `keep_prob`. Rate should be set to `rate = 1 - keep_prob`.
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) (None, 224, 224, 3) 0
__________________________________________________________________________________________________
Conv1_pad (ZeroPadding2D) (None, 225, 225, 3) 0 input_1[0][0]
__________________________________________________________________________________________________
Conv1 (Conv2D) (None, 112, 112, 32) 864 Conv1_pad[0][0]
__________________________________________________________________________________________________
bn_Conv1 (BatchNormalization) (None, 112, 112, 32) 128 Conv1[0][0]
__________________________________________________________________________________________________
Conv1_relu (ReLU) (None, 112, 112, 32) 0 bn_Conv1[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise (Depthw (None, 112, 112, 32) 288 Conv1_relu[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_BN (Bat (None, 112, 112, 32) 128 expanded_conv_depthwise[0][0]
__________________________________________________________________________________________________
expanded_conv_depthwise_relu (R (None, 112, 112, 32) 0 expanded_conv_depthwise_BN[0][0]
__________________________________________________________________________________________________
expanded_conv_project (Conv2D) (None, 112, 112, 16) 512 expanded_conv_depthwise_relu[0][0
__________________________________________________________________________________________________
expanded_conv_project_BN (Batch (None, 112, 112, 16) 64 expanded_conv_project[0][0]
__________________________________________________________________________________________________
block_1_expand (Conv2D) (None, 112, 112, 96) 1536 expanded_conv_project_BN[0][0]
__________________________________________________________________________________________________
block_1_expand_BN (BatchNormali (None, 112, 112, 96) 384 block_1_expand[0][0]
__________________________________________________________________________________________________
block_1_expand_relu (ReLU) (None, 112, 112, 96) 0 block_1_expand_BN[0][0]
__________________________________________________________________________________________________
block_1_pad (ZeroPadding2D) (None, 113, 113, 96) 0 block_1_expand_relu[0][0]
__________________________________________________________________________________________________
block_1_depthwise (DepthwiseCon (None, 56, 56, 96) 864 block_1_pad[0][0]
__________________________________________________________________________________________________
block_1_depthwise_BN (BatchNorm (None, 56, 56, 96) 384 block_1_depthwise[0][0]
__________________________________________________________________________________________________
block_1_depthwise_relu (ReLU) (None, 56, 56, 96) 0 block_1_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_1_project (Conv2D) (None, 56, 56, 24) 2304 block_1_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_1_project_BN (BatchNormal (None, 56, 56, 24) 96 block_1_project[0][0]
__________________________________________________________________________________________________
block_2_expand (Conv2D) (None, 56, 56, 144) 3456 block_1_project_BN[0][0]
__________________________________________________________________________________________________
block_2_expand_BN (BatchNormali (None, 56, 56, 144) 576 block_2_expand[0][0]
__________________________________________________________________________________________________
block_2_expand_relu (ReLU) (None, 56, 56, 144) 0 block_2_expand_BN[0][0]
__________________________________________________________________________________________________
block_2_depthwise (DepthwiseCon (None, 56, 56, 144) 1296 block_2_expand_relu[0][0]
__________________________________________________________________________________________________
block_2_depthwise_BN (BatchNorm (None, 56, 56, 144) 576 block_2_depthwise[0][0]
__________________________________________________________________________________________________
block_2_depthwise_relu (ReLU) (None, 56, 56, 144) 0 block_2_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_2_project (Conv2D) (None, 56, 56, 24) 3456 block_2_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_2_project_BN (BatchNormal (None, 56, 56, 24) 96 block_2_project[0][0]
__________________________________________________________________________________________________
block_2_add (Add) (None, 56, 56, 24) 0 block_1_project_BN[0][0]
block_2_project_BN[0][0]
__________________________________________________________________________________________________
block_3_expand (Conv2D) (None, 56, 56, 144) 3456 block_2_add[0][0]
__________________________________________________________________________________________________
block_3_expand_BN (BatchNormali (None, 56, 56, 144) 576 block_3_expand[0][0]
__________________________________________________________________________________________________
block_3_expand_relu (ReLU) (None, 56, 56, 144) 0 block_3_expand_BN[0][0]
__________________________________________________________________________________________________
block_3_pad (ZeroPadding2D) (None, 57, 57, 144) 0 block_3_expand_relu[0][0]
__________________________________________________________________________________________________
block_3_depthwise (DepthwiseCon (None, 28, 28, 144) 1296 block_3_pad[0][0]
__________________________________________________________________________________________________
block_3_depthwise_BN (BatchNorm (None, 28, 28, 144) 576 block_3_depthwise[0][0]
__________________________________________________________________________________________________
block_3_depthwise_relu (ReLU) (None, 28, 28, 144) 0 block_3_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_3_project (Conv2D) (None, 28, 28, 32) 4608 block_3_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_3_project_BN (BatchNormal (None, 28, 28, 32) 128 block_3_project[0][0]
__________________________________________________________________________________________________
block_4_expand (Conv2D) (None, 28, 28, 192) 6144 block_3_project_BN[0][0]
__________________________________________________________________________________________________
block_4_expand_BN (BatchNormali (None, 28, 28, 192) 768 block_4_expand[0][0]
__________________________________________________________________________________________________
block_4_expand_relu (ReLU) (None, 28, 28, 192) 0 block_4_expand_BN[0][0]
__________________________________________________________________________________________________
block_4_depthwise (DepthwiseCon (None, 28, 28, 192) 1728 block_4_expand_relu[0][0]
__________________________________________________________________________________________________
block_4_depthwise_BN (BatchNorm (None, 28, 28, 192) 768 block_4_depthwise[0][0]
__________________________________________________________________________________________________
block_4_depthwise_relu (ReLU) (None, 28, 28, 192) 0 block_4_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_4_project (Conv2D) (None, 28, 28, 32) 6144 block_4_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_4_project_BN (BatchNormal (None, 28, 28, 32) 128 block_4_project[0][0]
__________________________________________________________________________________________________
block_4_add (Add) (None, 28, 28, 32) 0 block_3_project_BN[0][0]
block_4_project_BN[0][0]
__________________________________________________________________________________________________
block_5_expand (Conv2D) (None, 28, 28, 192) 6144 block_4_add[0][0]
__________________________________________________________________________________________________
block_5_expand_BN (BatchNormali (None, 28, 28, 192) 768 block_5_expand[0][0]
__________________________________________________________________________________________________
block_5_expand_relu (ReLU) (None, 28, 28, 192) 0 block_5_expand_BN[0][0]
__________________________________________________________________________________________________
block_5_depthwise (DepthwiseCon (None, 28, 28, 192) 1728 block_5_expand_relu[0][0]
__________________________________________________________________________________________________
block_5_depthwise_BN (BatchNorm (None, 28, 28, 192) 768 block_5_depthwise[0][0]
__________________________________________________________________________________________________
block_5_depthwise_relu (ReLU) (None, 28, 28, 192) 0 block_5_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_5_project (Conv2D) (None, 28, 28, 32) 6144 block_5_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_5_project_BN (BatchNormal (None, 28, 28, 32) 128 block_5_project[0][0]
__________________________________________________________________________________________________
block_5_add (Add) (None, 28, 28, 32) 0 block_4_add[0][0]
block_5_project_BN[0][0]
__________________________________________________________________________________________________
block_6_expand (Conv2D) (None, 28, 28, 192) 6144 block_5_add[0][0]
__________________________________________________________________________________________________
block_6_expand_BN (BatchNormali (None, 28, 28, 192) 768 block_6_expand[0][0]
__________________________________________________________________________________________________
block_6_expand_relu (ReLU) (None, 28, 28, 192) 0 block_6_expand_BN[0][0]
__________________________________________________________________________________________________
block_6_pad (ZeroPadding2D) (None, 29, 29, 192) 0 block_6_expand_relu[0][0]
__________________________________________________________________________________________________
block_6_depthwise (DepthwiseCon (None, 14, 14, 192) 1728 block_6_pad[0][0]
__________________________________________________________________________________________________
block_6_depthwise_BN (BatchNorm (None, 14, 14, 192) 768 block_6_depthwise[0][0]
__________________________________________________________________________________________________
block_6_depthwise_relu (ReLU) (None, 14, 14, 192) 0 block_6_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_6_project (Conv2D) (None, 14, 14, 64) 12288 block_6_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_6_project_BN (BatchNormal (None, 14, 14, 64) 256 block_6_project[0][0]
__________________________________________________________________________________________________
block_7_expand (Conv2D) (None, 14, 14, 384) 24576 block_6_project_BN[0][0]
__________________________________________________________________________________________________
block_7_expand_BN (BatchNormali (None, 14, 14, 384) 1536 block_7_expand[0][0]
__________________________________________________________________________________________________
block_7_expand_relu (ReLU) (None, 14, 14, 384) 0 block_7_expand_BN[0][0]
__________________________________________________________________________________________________
block_7_depthwise (DepthwiseCon (None, 14, 14, 384) 3456 block_7_expand_relu[0][0]
__________________________________________________________________________________________________
block_7_depthwise_BN (BatchNorm (None, 14, 14, 384) 1536 block_7_depthwise[0][0]
__________________________________________________________________________________________________
block_7_depthwise_relu (ReLU) (None, 14, 14, 384) 0 block_7_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_7_project (Conv2D) (None, 14, 14, 64) 24576 block_7_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_7_project_BN (BatchNormal (None, 14, 14, 64) 256 block_7_project[0][0]
__________________________________________________________________________________________________
block_7_add (Add) (None, 14, 14, 64) 0 block_6_project_BN[0][0]
block_7_project_BN[0][0]
__________________________________________________________________________________________________
block_8_expand (Conv2D) (None, 14, 14, 384) 24576 block_7_add[0][0]
__________________________________________________________________________________________________
block_8_expand_BN (BatchNormali (None, 14, 14, 384) 1536 block_8_expand[0][0]
__________________________________________________________________________________________________
block_8_expand_relu (ReLU) (None, 14, 14, 384) 0 block_8_expand_BN[0][0]
__________________________________________________________________________________________________
block_8_depthwise (DepthwiseCon (None, 14, 14, 384) 3456 block_8_expand_relu[0][0]
__________________________________________________________________________________________________
block_8_depthwise_BN (BatchNorm (None, 14, 14, 384) 1536 block_8_depthwise[0][0]
__________________________________________________________________________________________________
block_8_depthwise_relu (ReLU) (None, 14, 14, 384) 0 block_8_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_8_project (Conv2D) (None, 14, 14, 64) 24576 block_8_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_8_project_BN (BatchNormal (None, 14, 14, 64) 256 block_8_project[0][0]
__________________________________________________________________________________________________
block_8_add (Add) (None, 14, 14, 64) 0 block_7_add[0][0]
block_8_project_BN[0][0]
__________________________________________________________________________________________________
block_9_expand (Conv2D) (None, 14, 14, 384) 24576 block_8_add[0][0]
__________________________________________________________________________________________________
block_9_expand_BN (BatchNormali (None, 14, 14, 384) 1536 block_9_expand[0][0]
__________________________________________________________________________________________________
block_9_expand_relu (ReLU) (None, 14, 14, 384) 0 block_9_expand_BN[0][0]
__________________________________________________________________________________________________
block_9_depthwise (DepthwiseCon (None, 14, 14, 384) 3456 block_9_expand_relu[0][0]
__________________________________________________________________________________________________
block_9_depthwise_BN (BatchNorm (None, 14, 14, 384) 1536 block_9_depthwise[0][0]
__________________________________________________________________________________________________
block_9_depthwise_relu (ReLU) (None, 14, 14, 384) 0 block_9_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_9_project (Conv2D) (None, 14, 14, 64) 24576 block_9_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_9_project_BN (BatchNormal (None, 14, 14, 64) 256 block_9_project[0][0]
__________________________________________________________________________________________________
block_9_add (Add) (None, 14, 14, 64) 0 block_8_add[0][0]
block_9_project_BN[0][0]
__________________________________________________________________________________________________
block_10_expand (Conv2D) (None, 14, 14, 384) 24576 block_9_add[0][0]
__________________________________________________________________________________________________
block_10_expand_BN (BatchNormal (None, 14, 14, 384) 1536 block_10_expand[0][0]
__________________________________________________________________________________________________
block_10_expand_relu (ReLU) (None, 14, 14, 384) 0 block_10_expand_BN[0][0]
__________________________________________________________________________________________________
block_10_depthwise (DepthwiseCo (None, 14, 14, 384) 3456 block_10_expand_relu[0][0]
__________________________________________________________________________________________________
block_10_depthwise_BN (BatchNor (None, 14, 14, 384) 1536 block_10_depthwise[0][0]
__________________________________________________________________________________________________
block_10_depthwise_relu (ReLU) (None, 14, 14, 384) 0 block_10_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_10_project (Conv2D) (None, 14, 14, 96) 36864 block_10_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_10_project_BN (BatchNorma (None, 14, 14, 96) 384 block_10_project[0][0]
__________________________________________________________________________________________________
block_11_expand (Conv2D) (None, 14, 14, 576) 55296 block_10_project_BN[0][0]
__________________________________________________________________________________________________
block_11_expand_BN (BatchNormal (None, 14, 14, 576) 2304 block_11_expand[0][0]
__________________________________________________________________________________________________
block_11_expand_relu (ReLU) (None, 14, 14, 576) 0 block_11_expand_BN[0][0]
__________________________________________________________________________________________________
block_11_depthwise (DepthwiseCo (None, 14, 14, 576) 5184 block_11_expand_relu[0][0]
__________________________________________________________________________________________________
block_11_depthwise_BN (BatchNor (None, 14, 14, 576) 2304 block_11_depthwise[0][0]
__________________________________________________________________________________________________
block_11_depthwise_relu (ReLU) (None, 14, 14, 576) 0 block_11_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_11_project (Conv2D) (None, 14, 14, 96) 55296 block_11_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_11_project_BN (BatchNorma (None, 14, 14, 96) 384 block_11_project[0][0]
__________________________________________________________________________________________________
block_11_add (Add) (None, 14, 14, 96) 0 block_10_project_BN[0][0]
block_11_project_BN[0][0]
__________________________________________________________________________________________________
block_12_expand (Conv2D) (None, 14, 14, 576) 55296 block_11_add[0][0]
__________________________________________________________________________________________________
block_12_expand_BN (BatchNormal (None, 14, 14, 576) 2304 block_12_expand[0][0]
__________________________________________________________________________________________________
block_12_expand_relu (ReLU) (None, 14, 14, 576) 0 block_12_expand_BN[0][0]
__________________________________________________________________________________________________
block_12_depthwise (DepthwiseCo (None, 14, 14, 576) 5184 block_12_expand_relu[0][0]
__________________________________________________________________________________________________
block_12_depthwise_BN (BatchNor (None, 14, 14, 576) 2304 block_12_depthwise[0][0]
__________________________________________________________________________________________________
block_12_depthwise_relu (ReLU) (None, 14, 14, 576) 0 block_12_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_12_project (Conv2D) (None, 14, 14, 96) 55296 block_12_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_12_project_BN (BatchNorma (None, 14, 14, 96) 384 block_12_project[0][0]
__________________________________________________________________________________________________
block_12_add (Add) (None, 14, 14, 96) 0 block_11_add[0][0]
block_12_project_BN[0][0]
__________________________________________________________________________________________________
block_13_expand (Conv2D) (None, 14, 14, 576) 55296 block_12_add[0][0]
__________________________________________________________________________________________________
block_13_expand_BN (BatchNormal (None, 14, 14, 576) 2304 block_13_expand[0][0]
__________________________________________________________________________________________________
block_13_expand_relu (ReLU) (None, 14, 14, 576) 0 block_13_expand_BN[0][0]
__________________________________________________________________________________________________
block_13_pad (ZeroPadding2D) (None, 15, 15, 576) 0 block_13_expand_relu[0][0]
__________________________________________________________________________________________________
block_13_depthwise (DepthwiseCo (None, 7, 7, 576) 5184 block_13_pad[0][0]
__________________________________________________________________________________________________
block_13_depthwise_BN (BatchNor (None, 7, 7, 576) 2304 block_13_depthwise[0][0]
__________________________________________________________________________________________________
block_13_depthwise_relu (ReLU) (None, 7, 7, 576) 0 block_13_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_13_project (Conv2D) (None, 7, 7, 160) 92160 block_13_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_13_project_BN (BatchNorma (None, 7, 7, 160) 640 block_13_project[0][0]
__________________________________________________________________________________________________
block_14_expand (Conv2D) (None, 7, 7, 960) 153600 block_13_project_BN[0][0]
__________________________________________________________________________________________________
block_14_expand_BN (BatchNormal (None, 7, 7, 960) 3840 block_14_expand[0][0]
__________________________________________________________________________________________________
block_14_expand_relu (ReLU) (None, 7, 7, 960) 0 block_14_expand_BN[0][0]
__________________________________________________________________________________________________
block_14_depthwise (DepthwiseCo (None, 7, 7, 960) 8640 block_14_expand_relu[0][0]
__________________________________________________________________________________________________
block_14_depthwise_BN (BatchNor (None, 7, 7, 960) 3840 block_14_depthwise[0][0]
__________________________________________________________________________________________________
block_14_depthwise_relu (ReLU) (None, 7, 7, 960) 0 block_14_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_14_project (Conv2D) (None, 7, 7, 160) 153600 block_14_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_14_project_BN (BatchNorma (None, 7, 7, 160) 640 block_14_project[0][0]
__________________________________________________________________________________________________
block_14_add (Add) (None, 7, 7, 160) 0 block_13_project_BN[0][0]
block_14_project_BN[0][0]
__________________________________________________________________________________________________
block_15_expand (Conv2D) (None, 7, 7, 960) 153600 block_14_add[0][0]
__________________________________________________________________________________________________
block_15_expand_BN (BatchNormal (None, 7, 7, 960) 3840 block_15_expand[0][0]
__________________________________________________________________________________________________
block_15_expand_relu (ReLU) (None, 7, 7, 960) 0 block_15_expand_BN[0][0]
__________________________________________________________________________________________________
block_15_depthwise (DepthwiseCo (None, 7, 7, 960) 8640 block_15_expand_relu[0][0]
__________________________________________________________________________________________________
block_15_depthwise_BN (BatchNor (None, 7, 7, 960) 3840 block_15_depthwise[0][0]
__________________________________________________________________________________________________
block_15_depthwise_relu (ReLU) (None, 7, 7, 960) 0 block_15_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_15_project (Conv2D) (None, 7, 7, 160) 153600 block_15_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_15_project_BN (BatchNorma (None, 7, 7, 160) 640 block_15_project[0][0]
__________________________________________________________________________________________________
block_15_add (Add) (None, 7, 7, 160) 0 block_14_add[0][0]
block_15_project_BN[0][0]
__________________________________________________________________________________________________
block_16_expand (Conv2D) (None, 7, 7, 960) 153600 block_15_add[0][0]
__________________________________________________________________________________________________
block_16_expand_BN (BatchNormal (None, 7, 7, 960) 3840 block_16_expand[0][0]
__________________________________________________________________________________________________
block_16_expand_relu (ReLU) (None, 7, 7, 960) 0 block_16_expand_BN[0][0]
__________________________________________________________________________________________________
block_16_depthwise (DepthwiseCo (None, 7, 7, 960) 8640 block_16_expand_relu[0][0]
__________________________________________________________________________________________________
block_16_depthwise_BN (BatchNor (None, 7, 7, 960) 3840 block_16_depthwise[0][0]
__________________________________________________________________________________________________
block_16_depthwise_relu (ReLU) (None, 7, 7, 960) 0 block_16_depthwise_BN[0][0]
__________________________________________________________________________________________________
block_16_project (Conv2D) (None, 7, 7, 320) 307200 block_16_depthwise_relu[0][0]
__________________________________________________________________________________________________
block_16_project_BN (BatchNorma (None, 7, 7, 320) 1280 block_16_project[0][0]
__________________________________________________________________________________________________
Conv_1 (Conv2D) (None, 7, 7, 1280) 409600 block_16_project_BN[0][0]
__________________________________________________________________________________________________
Conv_1_bn (BatchNormalization) (None, 7, 7, 1280) 5120 Conv_1[0][0]
__________________________________________________________________________________________________
out_relu (ReLU) (None, 7, 7, 1280) 0 Conv_1_bn[0][0]
__________________________________________________________________________________________________
global_average_pooling2d_1 (Glo (None, 1280) 0 out_relu[0][0]
__________________________________________________________________________________________________
dropout_1 (Dropout) (None, 1280) 0 global_average_pooling2d_1[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 1024) 1311744 dropout_1[0][0]
__________________________________________________________________________________________________
dropout_2 (Dropout) (None, 1024) 0 dense_1[0][0]
__________________________________________________________________________________________________
final_output (Dense) (None, 1103) 1130575 dropout_2[0][0]
==================================================================================================
Total params: 4,700,303
Trainable params: 2,442,319
Non-trainable params: 2,257,984
__________________________________________________________________________________________________
###Markdown
Train top layers (warm up model)
###Code
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=WARMUP_EPOCHS,
verbose=2,
max_queue_size=16, workers=3, use_multiprocessing=True)
###Output
WARNING:tensorflow:From /opt/conda/lib/python3.6/site-packages/tensorflow/python/ops/math_ops.py:3066: to_int32 (from tensorflow.python.ops.math_ops) is deprecated and will be removed in a future version.
Instructions for updating:
Use tf.cast instead.
Epoch 1/2
- 684s - loss: 0.0203 - acc: 0.9955 - categorical_accuracy: 0.0967 - val_loss: 0.0142 - val_acc: 0.9971 - val_categorical_accuracy: 0.1081
Epoch 2/2
- 664s - loss: 0.0127 - acc: 0.9972 - categorical_accuracy: 0.1351 - val_loss: 0.0144 - val_acc: 0.9971 - val_categorical_accuracy: 0.1005
###Markdown
Fine-tune the complete model
###Code
for layer in model.layers:
layer.trainable = True
# lrate = LearningRateScheduler(step_decay)
# rlrop = ReduceLROnPlateau(monitor='val_loss', factor=0.25, patience=(ES_PATIENCE-2))
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=ES_PATIENCE)
callbacks = [es]
optimizer = optimizers.Adam(lr=LEARNING_RATE)
metrics = ["accuracy"]
model.compile(optimizer=optimizer, loss="binary_crossentropy", metrics=metrics)
model.summary()
STEP_SIZE_TRAIN = train_generator.n//train_generator.batch_size
STEP_SIZE_VALID = valid_generator.n//valid_generator.batch_size
history = model.fit_generator(generator=train_generator,
steps_per_epoch=STEP_SIZE_TRAIN,
validation_data=valid_generator,
validation_steps=STEP_SIZE_VALID,
epochs=EPOCHS,
callbacks=callbacks,
verbose=2,
max_queue_size=16, workers=3, use_multiprocessing=True)
###Output
Epoch 1/30
- 781s - loss: 0.0114 - acc: 0.9973 - val_loss: 0.0108 - val_acc: 0.9974
Epoch 2/30
- 755s - loss: 0.0105 - acc: 0.9974 - val_loss: 0.0102 - val_acc: 0.9974
Epoch 3/30
- 756s - loss: 0.0105 - acc: 0.9974 - val_loss: 0.0102 - val_acc: 0.9975
Epoch 4/30
- 754s - loss: 0.0099 - acc: 0.9975 - val_loss: 0.0100 - val_acc: 0.9975
Epoch 5/30
- 754s - loss: 0.0097 - acc: 0.9975 - val_loss: 0.0099 - val_acc: 0.9976
Epoch 6/30
- 753s - loss: 0.0097 - acc: 0.9975 - val_loss: 0.0098 - val_acc: 0.9975
Epoch 7/30
- 752s - loss: 0.0092 - acc: 0.9976 - val_loss: 0.0097 - val_acc: 0.9975
Epoch 8/30
- 755s - loss: 0.0092 - acc: 0.9976 - val_loss: 0.0094 - val_acc: 0.9976
Epoch 9/30
- 751s - loss: 0.0091 - acc: 0.9976 - val_loss: 0.0095 - val_acc: 0.9976
Epoch 10/30
- 751s - loss: 0.0087 - acc: 0.9977 - val_loss: 0.0098 - val_acc: 0.9976
Epoch 11/30
- 758s - loss: 0.0087 - acc: 0.9977 - val_loss: 0.0094 - val_acc: 0.9976
Epoch 00011: early stopping
###Markdown
Complete model graph loss
###Code
sns.set_style("whitegrid")
fig, (ax1, ax2) = plt.subplots(1, 2, sharex='col', figsize=(20, 7))
ax1.plot(history.history['loss'], label='Train loss')
ax1.plot(history.history['val_loss'], label='Validation loss')
ax1.legend(loc='best')
ax1.set_title('Loss')
ax2.plot(history.history['acc'], label='Train Accuracy')
ax2.plot(history.history['val_acc'], label='Validation accuracy')
ax2.legend(loc='best')
ax2.set_title('Accuracy')
plt.xlabel('Epochs')
sns.despine()
plt.show()
###Output
_____no_output_____
###Markdown
Find best threshold value
###Code
lastFullValPred = np.empty((0, N_CLASSES))
lastFullValLabels = np.empty((0, N_CLASSES))
for i in range(STEP_SIZE_VALID+1):
im, lbl = next(valid_generator)
scores = model.predict(im, batch_size=valid_generator.batch_size)
lastFullValPred = np.append(lastFullValPred, scores, axis=0)
lastFullValLabels = np.append(lastFullValLabels, lbl, axis=0)
print(lastFullValPred.shape, lastFullValLabels.shape)
threshold, best_score = find_best_fixed_threshold(lastFullValPred, lastFullValLabels, do_plot=True)
###Output
thr=0.140 F2=0.489
###Markdown
Apply model to test set and output predictions
###Code
test_generator.reset()
STEP_SIZE_TEST = test_generator.n//test_generator.batch_size
preds = model.predict_generator(test_generator, steps=STEP_SIZE_TEST)
predictions = []
for pred_ar in preds:
valid = []
for idx, pred in enumerate(pred_ar):
if pred > threshold:
valid.append(idx)
if len(valid) == 0:
valid.append(np.argmax(pred_ar))
predictions.append(valid)
filenames = test_generator.filenames
label_map = {valid_generator.class_indices[k] : k for k in valid_generator.class_indices}
results = pd.DataFrame({'id':filenames, 'attribute_ids':predictions})
results['id'] = results['id'].map(lambda x: str(x)[:-4])
results['attribute_ids'] = results['attribute_ids'].apply(lambda x: list(map(label_map.get, x)))
results["attribute_ids"] = results["attribute_ids"].apply(lambda x: ' '.join(x))
results.to_csv('submission.csv',index=False)
results.head(10)
###Output
_____no_output_____
|
Chloe_Week-6_Exercise-2.ipynb
|
###Markdown
Week 6 Exercise 2 This notebook is derived from Eric Kansa's found here (https://github.com/ekansa/open-context-jupyter). This is going to demonstrate how to access Open Context's API and introduce you to data structures we haven't really encountered. You'll be returning a JSON with this activity, a format that is language-neutral and is great for dealing with data structured in a nested, hierarchical way. This API is accessed with Python, a programming language we haven't really used. Luckily, all you'll have to do is copy and paste the code - but as you're copy and pasting, remember to actually inspect the code and try to see if you can understand what each part of the code is doing. When using this notebook, you'll see text instructions like our typical exercises. We'll use the benefit of Jupyter notebooks to interserpse code into these instructions, and you'll be able to do the activity right here! When you want to add a code, remember to just click on the + symbol in the toolbar above - this will start a new line (which will default to code). 1. First thing we'll do is make the Request module available. This will allows us to send HTTP requests using Python.`import requests`
###Code
import requests
###Output
_____no_output_____
###Markdown
2. Then we'll create a variable called "api_search_url" and give it a value. Note that the value we're using is the Open Context search, returning a JSON. `api_search_url = 'https://opencontext.org/search/.json?'`
###Code
api_search_url = 'https://opencontext.org/search/.json?'
###Output
_____no_output_____
###Markdown
3. Now we'll create a dictionary in Python - these are unordered data values that are formatted in key:value pairs. In this case, we are creating a parameter for the Open Context search to just Roman data. params = { 'q': 'roman' }
###Code
params = { 'q': 'roman'
}
###Output
_____no_output_____
###Markdown
4. We can check to make sure we got the dictionary right. `params`
###Code
params
###Output
_____no_output_____
###Markdown
5. We will now create a variable that stores the request to the Open Context API with the parameters we've set up`response = requests.get(api_search_url, params=params)`
###Code
response = requests.get(api_search_url, params=params)
###Output
_____no_output_____
###Markdown
6. This next command shows us the url that is sent to the API`print('Here\'s the formatted url that gets sent to the Open Context API:\n{}\n'.format(response.url))`
###Code
print('Here\'s the formatted url that gets sent to the Open Context API:\n{}\n'.format(response.url))
###Output
Here's the formatted url that gets sent to the Open Context API:
https://opencontext.org/search/.json?q=roman
###Markdown
7. And this command checks the status code of the response to make sure no errors occurred. `if response.status_code == requests.codes.ok: print('All ok')elif response.status_code == 403: print('There was an authentication error. Did you paste your API above?')else: print('There was a problem. Error code: {}'.format(response.status_code)) print('Try running this cell again.')`
###Code
if response.status_code == requests.codes.ok:
print('All ok')
elif response.status_code == 403:
print('There was an authentication error. Did you paste your API above?')
else:
print('There was a problem. Error code: {}'.format(response.status_code))
print('Try running this cell again.')
###Output
All ok
###Markdown
8. To get the API's JSON results (what we asked it to grab), we'll create a variable called "data." Then see what the JSON data looks like. `data = response.json()``data`
###Code
data = response.json()
data
###Output
_____no_output_____
###Markdown
9. To make the JSON data easier to read, we'll add some color and indent some of the text. First we need to bring in the Pygments library. `import json``from pygments import highlight, lexers, formatters`Then indent the text. `formatted_data = json.dumps(data, indent=2)`Then color the text. `highlighted_data = highlight(formatted_data, lexers.JsonLexer(), formatters.TerminalFormatter())`
###Code
import json
from pygments import highlight, lexers, formatters
formatted_data = json.dumps(data, indent=2)
highlighted_data = highlight(formatted_data, lexers.JsonLexer(), formatters.TerminalFormatter())
###Output
_____no_output_____
###Markdown
10. Now display the new JSON data. `print(highlighted_data)`
###Code
print(highlighted_data)
###Output
{
[94m"@context"[39;49;00m: [
[33m"https://opencontext.org/contexts/search.json"[39;49;00m,
[33m"http://geojson.org/geojson-ld/geojson-context.jsonld"[39;49;00m
],
[94m"id"[39;49;00m: [33m"http://opencontext.org/search/.json?q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Open Context API"[39;49;00m,
[94m"dcmi:modified"[39;49;00m: [33m"2019-12-04T22:10:19Z"[39;49;00m,
[94m"dcmi:created"[39;49;00m: [33m"2019-11-06T05:10:54Z"[39;49;00m,
[94m"oai-pmh:earliestDatestamp"[39;49;00m: [33m"2006-02-28T00:00:00Z"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"2015"[39;49;00m,
[94m"dc-terms:temporal"[39;49;00m: [33m"-336999/2015"[39;49;00m,
[94m"totalResults"[39;49;00m: [34m77460[39;49;00m,
[94m"startIndex"[39;49;00m: [34m0[39;49;00m,
[94m"itemsPerPage"[39;49;00m: [34m20[39;49;00m,
[94m"next"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&rows=20&start=20"[39;49;00m,
[94m"next-json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&rows=20&start=20"[39;49;00m,
[94m"last"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&rows=20&start=77440"[39;49;00m,
[94m"last-json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&rows=20&start=77440"[39;49;00m,
[94m"oc-api:active-sorting"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"#current-sort-1"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:interest-score"[39;49;00m,
[94m"label"[39;49;00m: [33m"Interest score"[39;49;00m,
[94m"oc-api:sort-order"[39;49;00m: [33m"descending"[39;49;00m
}
],
[94m"oc-api:has-sorting"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&sort=item--asc"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&sort=item--asc"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:sort-item"[39;49;00m,
[94m"label"[39;49;00m: [33m"Item (type, provenance, label)"[39;49;00m,
[94m"oc-api:sort-order"[39;49;00m: [33m"ascending"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&sort=item--desc"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&sort=item--desc"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:sort-item"[39;49;00m,
[94m"label"[39;49;00m: [33m"Item (type, provenance, label)"[39;49;00m,
[94m"oc-api:sort-order"[39;49;00m: [33m"descending"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&sort=updated--asc"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&sort=updated--asc"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:sort-updated"[39;49;00m,
[94m"label"[39;49;00m: [33m"Updated"[39;49;00m,
[94m"oc-api:sort-order"[39;49;00m: [33m"ascending"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&sort=updated--desc"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&sort=updated--desc"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:sort-updated"[39;49;00m,
[94m"label"[39;49;00m: [33m"Updated"[39;49;00m,
[94m"oc-api:sort-order"[39;49;00m: [33m"descending"[39;49;00m
}
],
[94m"oc-api:active-filters"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"#filter-1"[39;49;00m,
[94m"oc-api:filter"[39;49;00m: [33m"Current Text Search Filter"[39;49;00m,
[94m"label"[39;49;00m: [33m"Search Term: 'roman'"[39;49;00m,
[94m"oc-api:remove"[39;49;00m: [33m"https://opencontext.org/search/"[39;49;00m,
[94m"oc-api:remove-json"[39;49;00m: [33m"https://opencontext.org/search/.json"[39;49;00m
}
],
[94m"oc-api:has-text-search"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"#textfield-keyword-search"[39;49;00m,
[94m"label"[39;49;00m: [33m"Filter by Text Search"[39;49;00m,
[94m"oc-api:search-term"[39;49;00m: [33m"roman"[39;49;00m,
[94m"oc-api:template"[39;49;00m: [33m"https://opencontext.org/search/?q={SearchTerm}"[39;49;00m,
[94m"oc-api:template-json"[39;49;00m: [33m"https://opencontext.org/search/.json?q={SearchTerm}"[39;49;00m
}
],
[94m"oc-api:has-form-use-life-ranges"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000100010101&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000100010101&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m2[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-337404"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-337405[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000001002013&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000001002013&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-81057"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-9035"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-81058[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-9036[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000001101&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000001101&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m2[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-15139"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-15140[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000001021&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000001021&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-10256"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-0490"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-10257[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-491[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000111&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000111&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m3[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-7815"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-7816[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000112&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000112&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-6594"[39;49;00m,
[94m"stop"[39;49;00m: [33m"0729"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-6595[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m729[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000330&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000330&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m131[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-6594"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-5373"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-6595[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-5374[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000100&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000100&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m59[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-4153"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-4154[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000120&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000120&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m5[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-4153"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-0490"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-4154[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-491[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000122&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000122&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-4153"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-1711"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-4154[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-1712[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000011&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000011&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m5[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-2932"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-2933[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000031&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000031&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m4[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-2932"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-0490"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-2933[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-491[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000010&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000010&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m5071[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-1711"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-1712[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000012&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000012&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m3818[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-1711"[39;49;00m,
[94m"stop"[39;49;00m: [33m"0729"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-1712[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m729[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000030&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000030&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m149[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-1711"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-0490"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-1712[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-491[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000001&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000001&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m59097[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-0490"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-491[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000003&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000003&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m5732[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"-0490"[39;49;00m,
[94m"stop"[39;49;00m: [33m"0729"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m-491[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m729[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?form-chronotile=10M-0000000000000&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?form-chronotile=10M-0000000000000&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m251[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:chrono-facet"[39;49;00m,
[94m"start"[39;49;00m: [33m"0729"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m,
[94m"properties"[39;49;00m: {
[94m"early bce/ce"[39;49;00m: [34m729[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
}
],
[94m"oc-api:max-disc-tile-zoom"[39;49;00m: [34m20[39;49;00m,
[94m"oc-api:response-tile-zoom"[39;49;00m: [34m6[39;49;00m,
[94m"oc-api:geotile-scope"[39;49;00m: [34mnull[39;49;00m,
[94m"oc-api:has-facets"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"#facet-context"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-api:facet-context"[39;49;00m,
[94m"label"[39;49;00m: [33m"Context"[39;49;00m,
[94m"data-type"[39;49;00m: [33m"id"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:facet-context"[39;49;00m,
[94m"oc-api:has-id-options"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Turkey?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Turkey.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/1_Global_Spatial"[39;49;00m,
[94m"label"[39;49;00m: [33m"Turkey"[39;49;00m,
[94m"count"[39;49;00m: [34m52876[39;49;00m,
[94m"slug"[39;49;00m: [33m"turkey"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Cyprus?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Cyprus.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/67D9F00D-14E6-4A1B-3A61-EC92FC774098"[39;49;00m,
[94m"label"[39;49;00m: [33m"Cyprus"[39;49;00m,
[94m"count"[39;49;00m: [34m8822[39;49;00m,
[94m"slug"[39;49;00m: [33m"cyprus"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Italy?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Italy.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/B66A08F2-5D96-4DD4-1AB1-32880C9A8D9D"[39;49;00m,
[94m"label"[39;49;00m: [33m"Italy"[39;49;00m,
[94m"count"[39;49;00m: [34m5065[39;49;00m,
[94m"slug"[39;49;00m: [33m"italy"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Jordan?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Jordan.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/D9AE02E5-C3F3-41D0-EB3A-39798F63GGGG"[39;49;00m,
[94m"label"[39;49;00m: [33m"Jordan"[39;49;00m,
[94m"count"[39;49;00m: [34m2777[39;49;00m,
[94m"slug"[39;49;00m: [33m"jordan"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Germany?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Germany.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/2_Global_Germany"[39;49;00m,
[94m"label"[39;49;00m: [33m"Germany"[39;49;00m,
[94m"count"[39;49;00m: [34m457[39;49;00m,
[94m"slug"[39;49;00m: [33m"germany"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Egypt?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Egypt.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/A2257E54-3B4F-4DA5-0E50-A428ECEB47A2"[39;49;00m,
[94m"label"[39;49;00m: [33m"Egypt"[39;49;00m,
[94m"count"[39;49;00m: [34m179[39;49;00m,
[94m"slug"[39;49;00m: [33m"egypt"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Israel?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Israel.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/3_Global_Israel"[39;49;00m,
[94m"label"[39;49;00m: [33m"Israel"[39;49;00m,
[94m"count"[39;49;00m: [34m79[39;49;00m,
[94m"slug"[39;49;00m: [33m"israel"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Palestinian+Authority?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Palestinian+Authority.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/4_global_Palestine"[39;49;00m,
[94m"label"[39;49;00m: [33m"Palestinian Authority"[39;49;00m,
[94m"count"[39;49;00m: [34m39[39;49;00m,
[94m"slug"[39;49;00m: [33m"palestinian-authority"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Poland?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Poland.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/a2f952b7-baef-49d8-9cfb-c32a630ac64e"[39;49;00m,
[94m"label"[39;49;00m: [33m"Poland"[39;49;00m,
[94m"count"[39;49;00m: [34m39[39;49;00m,
[94m"slug"[39;49;00m: [33m"poland"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Spain?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Spain.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/3776e3e7-91ea-4c35-9481-0b1fae3afa9a"[39;49;00m,
[94m"label"[39;49;00m: [33m"Spain"[39;49;00m,
[94m"count"[39;49;00m: [34m28[39;49;00m,
[94m"slug"[39;49;00m: [33m"spain"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/United+States?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/United+States.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/2A1B75E6-8C79-49B9-873A-A2E006669691"[39;49;00m,
[94m"label"[39;49;00m: [33m"United States"[39;49;00m,
[94m"count"[39;49;00m: [34m12[39;49;00m,
[94m"slug"[39;49;00m: [33m"united-states"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/Mexico?q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/Mexico.json?q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/subjects/ee431393-7ab1-4d7a-abec-bb05f53babda"[39;49;00m,
[94m"label"[39;49;00m: [33m"Mexico"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"mexico"[39;49;00m
}
]
},
{
[94m"id"[39;49;00m: [33m"#facet-item-type"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-api:facet-item-type"[39;49;00m,
[94m"label"[39;49;00m: [33m"Open Context Type"[39;49;00m,
[94m"data-type"[39;49;00m: [33m"id"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:facet-item-type"[39;49;00m,
[94m"oc-api:has-id-options"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=subjects"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=subjects"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:subjects"[39;49;00m,
[94m"label"[39;49;00m: [33m"Subjects"[39;49;00m,
[94m"count"[39;49;00m: [34m57603[39;49;00m,
[94m"slug"[39;49;00m: [33m"subjects"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=media"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=media"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:media"[39;49;00m,
[94m"label"[39;49;00m: [33m"Media"[39;49;00m,
[94m"count"[39;49;00m: [34m13703[39;49;00m,
[94m"slug"[39;49;00m: [33m"media"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=types"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=types"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:types"[39;49;00m,
[94m"label"[39;49;00m: [33m"Descriptive types"[39;49;00m,
[94m"count"[39;49;00m: [34m4341[39;49;00m,
[94m"slug"[39;49;00m: [33m"types"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=predicates"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=predicates"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:predicates"[39;49;00m,
[94m"label"[39;49;00m: [33m"Predicates or properties"[39;49;00m,
[94m"count"[39;49;00m: [34m1008[39;49;00m,
[94m"slug"[39;49;00m: [33m"predicates"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=documents"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=documents"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:documents"[39;49;00m,
[94m"label"[39;49;00m: [33m"Documents"[39;49;00m,
[94m"count"[39;49;00m: [34m526[39;49;00m,
[94m"slug"[39;49;00m: [33m"documents"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=persons"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=persons"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:persons"[39;49;00m,
[94m"label"[39;49;00m: [33m"Persons or Organizations"[39;49;00m,
[94m"count"[39;49;00m: [34m261[39;49;00m,
[94m"slug"[39;49;00m: [33m"persons"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=projects"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=projects"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:projects"[39;49;00m,
[94m"label"[39;49;00m: [33m"Projects or Collections"[39;49;00m,
[94m"count"[39;49;00m: [34m16[39;49;00m,
[94m"slug"[39;49;00m: [33m"projects"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?q=roman&type=tables"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?q=roman&type=tables"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-gen:tables"[39;49;00m,
[94m"label"[39;49;00m: [33m"Tables"[39;49;00m,
[94m"count"[39;49;00m: [34m2[39;49;00m,
[94m"slug"[39;49;00m: [33m"tables"[39;49;00m
}
]
},
{
[94m"id"[39;49;00m: [33m"#facet-prop-oc-gen-subjects"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/oc-general/subjects"[39;49;00m,
[94m"label"[39;49;00m: [33m"(Related) Subjects"[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--oc-gen-subjects"[39;49;00m,
[94m"data-type"[39;49;00m: [33m"id"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:facet-prop"[39;49;00m,
[94m"oc-api:has-id-options"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--oc-gen-cat-loc-or-context&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--oc-gen-cat-loc-or-context&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/oc-general/cat-loc-or-context"[39;49;00m,
[94m"label"[39;49;00m: [33m"Location or Context"[39;49;00m,
[94m"count"[39;49;00m: [34m7858[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--oc-gen-cat-loc-or-context"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--oc-gen-cat-object&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--oc-gen-cat-object&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/oc-general/cat-object"[39;49;00m,
[94m"label"[39;49;00m: [33m"Object"[39;49;00m,
[94m"count"[39;49;00m: [34m3255[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--oc-gen-cat-object"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--oc-gen-cat-sample-col&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--oc-gen-cat-sample-col&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/oc-general/cat-sample-col"[39;49;00m,
[94m"label"[39;49;00m: [33m"Sample, Collection, or Aggregation"[39;49;00m,
[94m"count"[39;49;00m: [34m2090[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--oc-gen-cat-sample-col"[39;49;00m
}
]
},
{
[94m"id"[39;49;00m: [33m"#facet-project"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-api:facet-project"[39;49;00m,
[94m"label"[39;49;00m: [33m"Project"[39;49;00m,
[94m"data-type"[39;49;00m: [33m"id"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:facet-project"[39;49;00m,
[94m"oc-api:has-id-options"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=117-avkat-archaeological-project&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=117-avkat-archaeological-project&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/02b55e8c-e9b1-49e5-8edf-0afeea10e2be"[39;49;00m,
[94m"label"[39;49;00m: [33m"Avkat Archaeological Project"[39;49;00m,
[94m"count"[39;49;00m: [34m51985[39;49;00m,
[94m"slug"[39;49;00m: [33m"117-avkat-archaeological-project"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=24-murlo&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=24-murlo&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/DF043419-F23B-41DA-7E4D-EE52AF22F92F"[39;49;00m,
[94m"label"[39;49;00m: [33m"Murlo"[39;49;00m,
[94m"count"[39;49;00m: [34m9179[39;49;00m,
[94m"slug"[39;49;00m: [33m"24-murlo"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=42-pyla-koutsopetria-archaeological-project-i-pedestrian&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=42-pyla-koutsopetria-archaeological-project-i-pedestrian&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/3F6DCD13-A476-488E-ED10-47D25513FCB2"[39;49;00m,
[94m"label"[39;49;00m: [33m"Pyla-Koutsopetria Archaeological Project I: Pedestrian Survey"[39;49;00m,
[94m"count"[39;49;00m: [34m6199[39;49;00m,
[94m"slug"[39;49;00m: [33m"42-pyla-koutsopetria-archaeological-project-i-pedestrian"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=10-petra-great-temple-excavations&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=10-petra-great-temple-excavations&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/A5DDBEA2-B3C8-43F9-8151-33343CBDC857"[39;49;00m,
[94m"label"[39;49;00m: [33m"Petra Great Temple Excavations"[39;49;00m,
[94m"count"[39;49;00m: [34m2939[39;49;00m,
[94m"slug"[39;49;00m: [33m"10-petra-great-temple-excavations"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=125-pyla-koutsopetria-archaeological-project-ii-geophysics&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=125-pyla-koutsopetria-archaeological-project-ii-geophysics&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/b9472eec-e622-4838-b6d8-5a2958b9d4d3"[39;49;00m,
[94m"label"[39;49;00m: [33m"Pyla-Koutsopetria Archaeological Project II: Geophysics and Excavation"[39;49;00m,
[94m"count"[39;49;00m: [34m2583[39;49;00m,
[94m"slug"[39;49;00m: [33m"125-pyla-koutsopetria-archaeological-project-ii-geophysics"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=104-the-gabii-project&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=104-the-gabii-project&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/3585b372-8d2d-436c-9a4c-b5c10fce3ccd"[39;49;00m,
[94m"label"[39;49;00m: [33m"The Gabii Project"[39;49;00m,
[94m"count"[39;49;00m: [34m2437[39;49;00m,
[94m"slug"[39;49;00m: [33m"104-the-gabii-project"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=11-aegean-archaeomalacology&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=11-aegean-archaeomalacology&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/B1DAC335-4DC6-4A57-622E-75BF28BA598D"[39;49;00m,
[94m"label"[39;49;00m: [33m"Aegean Archaeomalacology"[39;49;00m,
[94m"count"[39;49;00m: [34m977[39;49;00m,
[94m"slug"[39;49;00m: [33m"11-aegean-archaeomalacology"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=65-biometrical-database-of-european-aurochs-and-domestic-c&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=65-biometrical-database-of-european-aurochs-and-domestic-c&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/1816A043-92E2-471D-A23D-AAC58695D0D3"[39;49;00m,
[94m"label"[39;49;00m: [33m"Biometrical Database of European Aurochs and Domestic Cattle"[39;49;00m,
[94m"count"[39;49;00m: [34m526[39;49;00m,
[94m"slug"[39;49;00m: [33m"65-biometrical-database-of-european-aurochs-and-domestic-c"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=101-arce-sphinx-project-1979-1983-archive&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=101-arce-sphinx-project-1979-1983-archive&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"count"[39;49;00m: [34m184[39;49;00m,
[94m"slug"[39;49;00m: [33m"101-arce-sphinx-project-1979-1983-archive"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=63-excavations-at-polis&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=63-excavations-at-polis&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/766d9fd5-2175-41e3-b7c9-7eba6777f1f0"[39;49;00m,
[94m"label"[39;49;00m: [33m"Excavations at Polis"[39;49;00m,
[94m"count"[39;49;00m: [34m138[39;49;00m,
[94m"slug"[39;49;00m: [33m"63-excavations-at-polis"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=35-catalhoyuk-area-tp-zooarchaeology&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=35-catalhoyuk-area-tp-zooarchaeology&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/02594C48-7497-40D7-11AE-AB942DC513B8"[39;49;00m,
[94m"label"[39;49;00m: [33m"\u00c7atalh\u00f6y\u00fck Area TP Zooarchaeology"[39;49;00m,
[94m"count"[39;49;00m: [34m123[39;49;00m,
[94m"slug"[39;49;00m: [33m"35-catalhoyuk-area-tp-zooarchaeology"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=112-tel-dor-area-g-report&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=112-tel-dor-area-g-report&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/b028c160-092e-4e2d-9738-a40367ad543b"[39;49;00m,
[94m"label"[39;49;00m: [33m"Tel Dor, Area G Report"[39;49;00m,
[94m"count"[39;49;00m: [34m65[39;49;00m,
[94m"slug"[39;49;00m: [33m"112-tel-dor-area-g-report"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=14-bade-museum&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=14-bade-museum&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/B4345F6A-F926-4062-144E-3FBC175CC7B6"[39;49;00m,
[94m"label"[39;49;00m: [33m"Bade Museum"[39;49;00m,
[94m"count"[39;49;00m: [34m46[39;49;00m,
[94m"slug"[39;49;00m: [33m"14-bade-museum"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=22-kenan-tepe&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=22-kenan-tepe&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/3DE4CD9C-259E-4C14-9B03-8B10454BA66E"[39;49;00m,
[94m"label"[39;49;00m: [33m"Kenan Tepe"[39;49;00m,
[94m"count"[39;49;00m: [34m15[39;49;00m,
[94m"slug"[39;49;00m: [33m"22-kenan-tepe"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=93-biometry-of-iron-age-ii-and-hellenistic-period-dog-bur&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=93-biometry-of-iron-age-ii-and-hellenistic-period-dog-bur&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/646b7034-07b7-4971-8d89-ebe37dda4cd2"[39;49;00m,
[94m"label"[39;49;00m: [33m"Biometry of Iron Age II and Hellenistic Period Dog 'Burials' from Tell Gezer and Other Sites"[39;49;00m,
[94m"count"[39;49;00m: [34m15[39;49;00m,
[94m"slug"[39;49;00m: [33m"93-biometry-of-iron-age-ii-and-hellenistic-period-dog-bur"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=1-domuztepe-excavations&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=1-domuztepe-excavations&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/3"[39;49;00m,
[94m"label"[39;49;00m: [33m"Domuztepe Excavations"[39;49;00m,
[94m"count"[39;49;00m: [34m14[39;49;00m,
[94m"slug"[39;49;00m: [33m"1-domuztepe-excavations"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=52-digital-index-of-north-american-archaeology-dinaa&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=52-digital-index-of-north-american-archaeology-dinaa&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/416A274C-CF88-4471-3E31-93DB825E9E4A"[39;49;00m,
[94m"label"[39;49;00m: [33m"Digital Index of North American Archaeology (DINAA)"[39;49;00m,
[94m"count"[39;49;00m: [34m7[39;49;00m,
[94m"slug"[39;49;00m: [33m"52-digital-index-of-north-american-archaeology-dinaa"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=91-historic-fort-snelling&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=91-historic-fort-snelling&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/fab0532a-2953-4f13-aa97-8a9d7e992dbe"[39;49;00m,
[94m"label"[39;49;00m: [33m"Historic Fort Snelling"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"slug"[39;49;00m: [33m"91-historic-fort-snelling"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=99-idalion&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=99-idalion&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/df8e4efd-e06b-4b0b-a056-08b6fd199536"[39;49;00m,
[94m"label"[39;49;00m: [33m"Idalion"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"slug"[39;49;00m: [33m"99-idalion"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=39-ceramics-trade-provenience-and-geology-cyprus-in-the&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=39-ceramics-trade-provenience-and-geology-cyprus-in-the&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/ABABD13C-A69F-499E-CA7F-5118F3684E4D"[39;49;00m,
[94m"label"[39;49;00m: [33m"Ceramics, Trade, Provenience and Geology: Cyprus in the Late Bronze Age"[39;49;00m,
[94m"count"[39;49;00m: [34m5[39;49;00m,
[94m"slug"[39;49;00m: [33m"39-ceramics-trade-provenience-and-geology-cyprus-in-the"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=109-differentiating-local-from-nonlocal-ceramic-production&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=109-differentiating-local-from-nonlocal-ceramic-production&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/cbd24bbb-c6fc-44ed-bd67-6f844f120ad5"[39;49;00m,
[94m"label"[39;49;00m: [33m"Differentiating local from nonlocal ceramic production at Iron Age Sardis using NAA"[39;49;00m,
[94m"count"[39;49;00m: [34m3[39;49;00m,
[94m"slug"[39;49;00m: [33m"109-differentiating-local-from-nonlocal-ceramic-production"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=100-paleoindian-database-of-the-americas-pidba&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=100-paleoindian-database-of-the-americas-pidba&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/d75ef68a-0e23-458c-976d-286d83c13013"[39;49;00m,
[94m"label"[39;49;00m: [33m"Paleoindian Database of the Americas (PIDBA)"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"100-paleoindian-database-of-the-americas-pidba"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=103-architecture-and-urbanism-at-seyitomer-hoyuk-turkey&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=103-architecture-and-urbanism-at-seyitomer-hoyuk-turkey&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/347286db-b6c6-4fd2-b3bd-b50316b0cb9f"[39;49;00m,
[94m"label"[39;49;00m: [33m"Architecture and Urbanism at Seyit\u00f6mer H\u00f6y\u00fck, Turkey"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"103-architecture-and-urbanism-at-seyitomer-hoyuk-turkey"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=20-dhiban-excavation-and-development-project&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=20-dhiban-excavation-and-development-project&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/01D080DF-2F6B-4F59-BCF0-87543AC89574"[39;49;00m,
[94m"label"[39;49;00m: [33m"Dhiban Excavation and Development Project"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"20-dhiban-excavation-and-development-project"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=21-rough-cilicia&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=21-rough-cilicia&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/295B5BF4-0F44-4698-80CD-7A39CB6F133D"[39;49;00m,
[94m"label"[39;49;00m: [33m"Rough Cilicia"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"21-rough-cilicia"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=26-zooarchaeology-of-karain-cave-b&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=26-zooarchaeology-of-karain-cave-b&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/731B0670-CE2A-414A-8EF6-9C050A1C60F5"[39;49;00m,
[94m"label"[39;49;00m: [33m"Zooarchaeology of Karain Cave B"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"26-zooarchaeology-of-karain-cave-b"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=31-barcin-hoyuk-zooarchaeology&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=31-barcin-hoyuk-zooarchaeology&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/74749949-4FD4-4C3E-C830-5AA75703E08E"[39;49;00m,
[94m"label"[39;49;00m: [33m"Bar\u00e7\u0131n H\u00f6y\u00fck Zooarchaeology"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"31-barcin-hoyuk-zooarchaeology"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=36-ilipinar-zooarchaeology&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=36-ilipinar-zooarchaeology&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/D297CD29-50CA-4B2C-4A07-498ADF3AF487"[39;49;00m,
[94m"label"[39;49;00m: [33m"Il\u0131p\u0131nar Zooarchaeology"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"36-ilipinar-zooarchaeology"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?proj=9-iraq-heritage-program&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?proj=9-iraq-heritage-program&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/projects/GHF1PRJ0000000025"[39;49;00m,
[94m"label"[39;49;00m: [33m"Iraq Heritage Program"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"slug"[39;49;00m: [33m"9-iraq-heritage-program"[39;49;00m
}
]
},
{
[94m"id"[39;49;00m: [33m"#facet-prop-ld"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"oc-api:facet-prop-ld"[39;49;00m,
[94m"label"[39;49;00m: [33m"Descriptions (Common Standards)"[39;49;00m,
[94m"data-type"[39;49;00m: [33m"id"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-api:facet-prop"[39;49;00m,
[94m"oc-api:has-id-options"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-temporal&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-temporal&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/temporal"[39;49;00m,
[94m"label"[39;49;00m: [33m"Temporal Coverage"[39;49;00m,
[94m"count"[39;49;00m: [34m63787[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-temporal"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=cidoc-crm-p2-has-type&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=cidoc-crm-p2-has-type&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://erlangen-crm.org/current/P2_has_type"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has type"[39;49;00m,
[94m"count"[39;49;00m: [34m7824[39;49;00m,
[94m"slug"[39;49;00m: [33m"cidoc-crm-p2-has-type"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=skos-related&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=skos-related&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://www.w3.org/2004/02/skos/core#related"[39;49;00m,
[94m"label"[39;49;00m: [33m"Related"[39;49;00m,
[94m"count"[39;49;00m: [34m4410[39;49;00m,
[94m"slug"[39;49;00m: [33m"skos-related"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--cidoc-crm-p2-has-type&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--cidoc-crm-p2-has-type&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://erlangen-crm.org/current/P2_has_type"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has type"[39;49;00m,
[94m"count"[39;49;00m: [34m3550[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--cidoc-crm-p2-has-type"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=cidoc-crm-p45-consists-of&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=cidoc-crm-p45-consists-of&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://erlangen-crm.org/current/P45_consists_of"[39;49;00m,
[94m"label"[39;49;00m: [33m"Consists of"[39;49;00m,
[94m"count"[39;49;00m: [34m2933[39;49;00m,
[94m"slug"[39;49;00m: [33m"cidoc-crm-p45-consists-of"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--cidoc-crm-p45-consists-of&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--cidoc-crm-p45-consists-of&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://erlangen-crm.org/current/P45_consists_of"[39;49;00m,
[94m"label"[39;49;00m: [33m"Consists of"[39;49;00m,
[94m"count"[39;49;00m: [34m1455[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--cidoc-crm-p45-consists-of"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=oc-zoo-has-anat-id&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=oc-zoo-has-anat-id&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/open-context-zooarch/has-anat-id"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has anatomical identification"[39;49;00m,
[94m"count"[39;49;00m: [34m988[39;49;00m,
[94m"slug"[39;49;00m: [33m"oc-zoo-has-anat-id"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=biol-term-hastaxonomy&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=biol-term-hastaxonomy&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/NET/biol/ns#term_hasTaxonomy"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has Biological Taxonomy"[39;49;00m,
[94m"count"[39;49;00m: [34m964[39;49;00m,
[94m"slug"[39;49;00m: [33m"biol-term-hastaxonomy"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=oc-zoo-anatomical-meas&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=oc-zoo-anatomical-meas&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/open-context-zooarch/anatomical-meas"[39;49;00m,
[94m"label"[39;49;00m: [33m"Anatomical measurement"[39;49;00m,
[94m"count"[39;49;00m: [34m542[39;49;00m,
[94m"slug"[39;49;00m: [33m"oc-zoo-anatomical-meas"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-references&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-references&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/references"[39;49;00m,
[94m"label"[39;49;00m: [33m"References"[39;49;00m,
[94m"count"[39;49;00m: [34m527[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-references"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=oc-zoo-has-fusion-char&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=oc-zoo-has-fusion-char&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/open-context-zooarch/has-fusion-char"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has fusion character"[39;49;00m,
[94m"count"[39;49;00m: [34m521[39;49;00m,
[94m"slug"[39;49;00m: [33m"oc-zoo-has-fusion-char"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-ispartof&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-ispartof&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/isPartOf"[39;49;00m,
[94m"label"[39;49;00m: [33m"Is Part Of"[39;49;00m,
[94m"count"[39;49;00m: [34m491[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-ispartof"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=skos-closematch&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=skos-closematch&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://www.w3.org/2004/02/skos/core#closeMatch"[39;49;00m,
[94m"label"[39;49;00m: [33m"Close Match"[39;49;00m,
[94m"count"[39;49;00m: [34m429[39;49;00m,
[94m"slug"[39;49;00m: [33m"skos-closematch"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--gawd-origin&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--gawd-origin&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://gawd.atlantides.org/terms/origin"[39;49;00m,
[94m"label"[39;49;00m: [33m"Origin place"[39;49;00m,
[94m"count"[39;49;00m: [34m137[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--gawd-origin"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--dc-terms-subject&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--dc-terms-subject&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/subject"[39;49;00m,
[94m"label"[39;49;00m: [33m"Subject"[39;49;00m,
[94m"count"[39;49;00m: [34m125[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--dc-terms-subject"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=gawd-origin&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=gawd-origin&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://gawd.atlantides.org/terms/origin"[39;49;00m,
[94m"label"[39;49;00m: [33m"Origin place"[39;49;00m,
[94m"count"[39;49;00m: [34m70[39;49;00m,
[94m"slug"[39;49;00m: [33m"gawd-origin"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-subject&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-subject&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/subject"[39;49;00m,
[94m"label"[39;49;00m: [33m"Subject"[39;49;00m,
[94m"count"[39;49;00m: [34m50[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-subject"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-isreferencedby&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-isreferencedby&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/isReferencedBy"[39;49;00m,
[94m"label"[39;49;00m: [33m"Is Referenced By"[39;49;00m,
[94m"count"[39;49;00m: [34m18[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-isreferencedby"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-haspart&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-haspart&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/hasPart"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has Part"[39;49;00m,
[94m"count"[39;49;00m: [34m17[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-haspart"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=bibo-status&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=bibo-status&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/ontology/bibo/status"[39;49;00m,
[94m"label"[39;49;00m: [33m"Status"[39;49;00m,
[94m"count"[39;49;00m: [34m16[39;49;00m,
[94m"slug"[39;49;00m: [33m"bibo-status"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-spatial&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-spatial&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/spatial"[39;49;00m,
[94m"label"[39;49;00m: [33m"Spatial"[39;49;00m,
[94m"count"[39;49;00m: [34m14[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-spatial"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-coverage&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-coverage&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/coverage"[39;49;00m,
[94m"label"[39;49;00m: [33m"Coverage"[39;49;00m,
[94m"count"[39;49;00m: [34m11[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-coverage"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=oc-zoo-has-phys-sex-det&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=oc-zoo-has-phys-sex-det&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/open-context-zooarch/has-phys-sex-det"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has physiological sex determination"[39;49;00m,
[94m"count"[39;49;00m: [34m8[39;49;00m,
[94m"slug"[39;49;00m: [33m"oc-zoo-has-phys-sex-det"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dinaa-00001&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dinaa-00001&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/vocabularies/dinaa/00001"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has period"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"slug"[39;49;00m: [33m"dinaa-00001"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--dc-terms-references&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--dc-terms-references&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/references"[39;49;00m,
[94m"label"[39;49;00m: [33m"References"[39;49;00m,
[94m"count"[39;49;00m: [34m4[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--dc-terms-references"[39;49;00m
}
],
[94m"oc-api:has-text-options"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=rel--skos-altlabel&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=rel--skos-altlabel&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://www.w3.org/2004/02/skos/core#altLabel"[39;49;00m,
[94m"label"[39;49;00m: [33m"Alternative label"[39;49;00m,
[94m"count"[39;49;00m: [34m133[39;49;00m,
[94m"slug"[39;49;00m: [33m"rel--skos-altlabel"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=skos-altlabel&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=skos-altlabel&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://www.w3.org/2004/02/skos/core#altLabel"[39;49;00m,
[94m"label"[39;49;00m: [33m"Alternative label"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"slug"[39;49;00m: [33m"skos-altlabel"[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?prop=dc-terms-identifier&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?prop=dc-terms-identifier&q=roman"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://purl.org/dc/terms/identifier"[39;49;00m,
[94m"label"[39;49;00m: [33m"Identifier"[39;49;00m,
[94m"count"[39;49;00m: [34m3[39;49;00m,
[94m"slug"[39;49;00m: [33m"dc-terms-identifier"[39;49;00m
}
]
},
{
[94m"id"[39;49;00m: [33m"#related-media"[39;49;00m,
[94m"label"[39;49;00m: [33m"Has Related Media"[39;49;00m,
[94m"oc-api:has-rel-media-options"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?images=1&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?images=1&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Linked with images"[39;49;00m,
[94m"count"[39;49;00m: [34m3348[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?other-media=1&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?other-media=1&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Linked with media (non-image)"[39;49;00m,
[94m"count"[39;49;00m: [34m108[39;49;00m
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?documents=1&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?documents=1&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Linked with documents"[39;49;00m,
[94m"count"[39;49;00m: [34m19[39;49;00m
}
]
}
],
[94m"type"[39;49;00m: [33m"FeatureCollection"[39;49;00m,
[94m"features"[39;49;00m: [
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=021333&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=021333&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-021333"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-021333"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m-95.625[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m-95.625[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m-95.625[39;49;00m,
[34m40.979898069620155[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-021333"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=021333&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (1)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m6[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=023131&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=023131&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m3[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-023131"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-023131"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m-95.625[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m-95.625[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m-95.625[39;49;00m,
[34m27.059125784374068[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-023131"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=023131&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (2)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m3[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=023301&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=023301&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-023301"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-023301"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m-106.875[39;49;00m,
[34m16.636191878397664[39;49;00m
],
[
[34m-101.25[39;49;00m,
[34m16.636191878397664[39;49;00m
],
[
[34m-101.25[39;49;00m,
[34m21.943045533438166[39;49;00m
],
[
[34m-106.875[39;49;00m,
[34m21.943045533438166[39;49;00m
],
[
[34m-106.875[39;49;00m,
[34m16.636191878397664[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-023301"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=023301&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (3)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=030222&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=030222&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-030222"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-030222"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m-90.0[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-84.375[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-84.375[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m40.979898069620155[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-030222"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=030222&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (4)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=032000&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=032000&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-032000"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-032000"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m-90.0[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m-84.375[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m-84.375[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-90.0[39;49;00m,
[34m36.5978891330702[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-032000"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=032000&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (5)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=032021&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=032021&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m2[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-032021"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-032021"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m-84.375[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m-78.75000000000001[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m-78.75000000000001[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m-84.375[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m-84.375[39;49;00m,
[34m27.059125784374068[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-032021"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=032021&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (6)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m2[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=033111&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=033111&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m28[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-033111"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-033111"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m-5.625000000000013[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m0.0[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m0.0[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-5.625000000000013[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m-5.625000000000013[39;49;00m,
[34m36.5978891330702[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-033111"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=033111&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (7)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m28[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120201&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=120201&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m127[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-120201"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-120201"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m5.625000000000013[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m55.7765730186677[39;49;00m
],
[
[34m5.625000000000013[39;49;00m,
[34m55.7765730186677[39;49;00m
],
[
[34m5.625000000000013[39;49;00m,
[34m52.482780222078226[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-120201"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120201&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (8)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m127[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120203&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=120203&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m263[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-120203"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-120203"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m5.625000000000013[39;49;00m,
[34m48.922499263758255[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m48.922499263758255[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m5.625000000000013[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m5.625000000000013[39;49;00m,
[34m48.922499263758255[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-120203"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120203&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (9)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m263[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120210&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=120210&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m29[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-120210"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-120210"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m11.249999999999993[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m55.7765730186677[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m55.7765730186677[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m52.482780222078226[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-120210"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120210&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (10)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m29[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120211&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=120211&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m10[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-120211"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-120211"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m16.875000000000007[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m22.499999999999986[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m22.499999999999986[39;49;00m,
[34m55.7765730186677[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m55.7765730186677[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m52.482780222078226[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-120211"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120211&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (11)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m10[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120212&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=120212&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m56[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-120212"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-120212"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m11.249999999999993[39;49;00m,
[34m48.922499263758255[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m48.922499263758255[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m52.482780222078226[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m48.922499263758255[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-120212"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120212&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (12)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m56[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120221&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=120221&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m13[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-120221"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-120221"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m5.625000000000013[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m48.922499263758255[39;49;00m
],
[
[34m5.625000000000013[39;49;00m,
[34m48.922499263758255[39;49;00m
],
[
[34m5.625000000000013[39;49;00m,
[34m45.089035564831015[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-120221"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120221&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (13)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m13[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120232&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=120232&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m9965[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-120232"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-120232"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m11.249999999999993[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m16.875000000000007[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m45.089035564831015[39;49;00m
],
[
[34m11.249999999999993[39;49;00m,
[34m40.979898069620155[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-120232"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=120232&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (14)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m9965[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122100&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122100&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m980[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122100"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122100"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m22.499999999999986[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m22.499999999999986[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m22.499999999999986[39;49;00m,
[34m36.5978891330702[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122100"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122100&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (15)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m980[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122101&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122101&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m127[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122101"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122101"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m28.125[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m36.5978891330702[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122101"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122101&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (16)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m127[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122103&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122103&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m8932[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122103"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122103"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m28.125[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m31.952162238024975[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122103"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122103&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (17)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m8932[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122110&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122110&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m51932[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122110"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122110"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m33.750000000000014[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m36.5978891330702[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122110"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122110&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (18)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m51932[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122111&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122111&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m15[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122111"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122111"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m39.37499999999999[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m45.00000000000001[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m45.00000000000001[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m40.979898069620155[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m36.5978891330702[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122111"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122111&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (19)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m15[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122112&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122112&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m78[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122112"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122112"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m33.750000000000014[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m36.5978891330702[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m31.952162238024975[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122112"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122112&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (20)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m78[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122121&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122121&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m181[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122121"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122121"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m28.125[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m28.125[39;49;00m,
[34m27.059125784374068[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122121"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122121&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (21)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m181[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122130&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122130&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m2981[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122130"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122130"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m33.750000000000014[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m33.750000000000014[39;49;00m,
[34m27.059125784374068[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122130"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122130&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (22)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m2981[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122131&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=122131&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m7[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-122131"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-122131"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m39.37499999999999[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m45.00000000000001[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m45.00000000000001[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m39.37499999999999[39;49;00m,
[34m27.059125784374068[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-122131"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=122131&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (23)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m7[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=123020&q=roman"[39;49;00m,
[94m"json"[39;49;00m: [33m"https://opencontext.org/search/.json?disc-geotile=123020&q=roman"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-facet"[39;49;00m,
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-123020"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-336999"[39;49;00m,
[94m"stop"[39;49;00m: [33m"1950"[39;49;00m
},
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-geom-123020"[39;49;00m,
[94m"type"[39;49;00m: [33m"Polygon"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[
[
[34m45.00000000000001[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m50.62499999999999[39;49;00m,
[34m27.059125784374068[39;49;00m
],
[
[34m50.62499999999999[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m45.00000000000001[39;49;00m,
[34m31.952162238024975[39;49;00m
],
[
[34m45.00000000000001[39;49;00m,
[34m27.059125784374068[39;49;00m
]
]
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-disc-tile-123020"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/search/?disc-geotile=123020&q=roman"[39;49;00m,
[94m"label"[39;49;00m: [33m"Discovery region (24)"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"discovery region (facet)"[39;49;00m,
[94m"count"[39;49;00m: [34m1[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-337000[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1950[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-1-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-nw-004"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/433b76a4-d91e-48ec-9821-70dbaf510957"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-1-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137304256746678[39;49;00m,
[34m29.975380310025677[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-1-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/433b76a4-d91e-48ec-9821-70dbaf510957"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/433b76a4-d91e-48ec-9821-70dbaf510957"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-nw-004"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx Northwest/Feature FNWa2"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/cecae52e-d854-4798-ba27-03edfb538bd0"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" small (modern and <em>Roman</em>) veneer stones immediately west of the N Large Box (\"caison\"). Profile shows the"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/101-drawing-d-nw-004.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:03:45Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:20:16Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-2-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-nw-001"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/607284d3-0506-4db5-b491-6a567e0544c0"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-2-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137304256746678[39;49;00m,
[34m29.975380310025677[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-2-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/607284d3-0506-4db5-b491-6a567e0544c0"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/607284d3-0506-4db5-b491-6a567e0544c0"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-nw-001"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx Northwest/Feature FNWa2"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/cecae52e-d854-4798-ba27-03edfb538bd0"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" \n Description NWa2: Large fine limestone block showing after removal of small (modern and <em>Roman</em>) veneer stones"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/101-drawing-d-nw-001.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:03:41Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:22:16Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-3-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-n-008"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/99478565-701f-4329-be42-dce415b9b68a"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-3-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-3-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/99478565-701f-4329-be42-dce415b9b68a"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/99478565-701f-4329-be42-dce415b9b68a"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-n-008"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" R14 was the removal of a small amount of deposits immediately south of the <em>Roman</em> Stairs, between the"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/101-drawing-d-n-008.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:02:26Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:20:12Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-4-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-016"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/27d3136b-3023-45e4-ae86-901fa8ad8342"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-4-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-4-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/27d3136b-3023-45e4-ae86-901fa8ad8342"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/27d3136b-3023-45e4-ae86-901fa8ad8342"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-016"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" immediately south of the <em>Roman</em> Stairs, between the stairs and the Sound and Light box and a core block of the"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-e-016.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:31Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:05:18Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-5-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-017"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/4f1437de-aef1-4f43-8c0b-47eb8e7fe850"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-5-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-5-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/4f1437de-aef1-4f43-8c0b-47eb8e7fe850"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/4f1437de-aef1-4f43-8c0b-47eb8e7fe850"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-017"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" <em>Roman</em> Stairs, between the stairs and the Sound and Light box and a core block of the west Sphinx Temple"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-e-017.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:31Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:05:51Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-6-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-019"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/846687bc-2e8c-4b44-876c-4873e4278250"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-6-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-6-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/846687bc-2e8c-4b44-876c-4873e4278250"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/846687bc-2e8c-4b44-876c-4873e4278250"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-019"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" R14 was the removal of a small amount of deposits immediately south of the <em>Roman</em> Stairs, between the"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-e-019.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:33Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:02:11Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-7-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-022"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/b9a02c98-e11d-47c7-81c4-db140c1712ed"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-7-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-7-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/b9a02c98-e11d-47c7-81c4-db140c1712ed"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/b9a02c98-e11d-47c7-81c4-db140c1712ed"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-022"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" of <em>Roman</em> pavement and SE of South Forepaw \n \n Year \n Resource Type Drawings \n Keywords Depression"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-e-022.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:35Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:22:18Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-8-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"2771"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/07c8b5d2-dab1-4b21-ae90-ceb0ffe515b0"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-8-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m12.719540897587173[39;49;00m,
[34m41.88768286105927[39;49;00m
]
},
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-rec-when-8-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-0499"[39;49;00m,
[94m"stop"[39;49;00m: [33m"-0049"[39;49;00m
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-8-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/07c8b5d2-dab1-4b21-ae90-ceb0ffe515b0"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/07c8b5d2-dab1-4b21-ae90-ceb0ffe515b0"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"2771"[39;49;00m,
[94m"project label"[39;49;00m: [33m"The Gabii Project"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/3585b372-8d2d-436c-9a4c-b5c10fce3ccd"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Italy/Gabii/Area B/Unit 1162"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/98014789-c3a6-48c6-9da8-8598d1c026eb"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-500.0[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m-50.0[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" Priority Medium \n File Attachment Filename 2771.pdf \n \n<em>Roman</em> Republican\nhttp://n2t.net/ark:/99152"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://opencontext.org/static/oc/images/icons/3d-noun-37529.png"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-06-04T23:27:35Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-01-31T08:15:43Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-9-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-020"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/254e87ae-8e32-43c9-a13b-71d579213978"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-9-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.13804786067644[39;49;00m,
[34m29.97526634987221[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-9-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/254e87ae-8e32-43c9-a13b-71d579213978"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/254e87ae-8e32-43c9-a13b-71d579213978"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-020"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Roman Pavement/Removal 15"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/c89534ea-27ef-4989-9330-188925c983b8"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m"Drawing d-e-020 from Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/<em>Roman</em> Pavement/Removal"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-e-020.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:33Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:04:19Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-10-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-021"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/14851d29-eb3f-431e-84cc-d1d90fc165eb"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-10-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.13804786067644[39;49;00m,
[34m29.97526634987221[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-10-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/14851d29-eb3f-431e-84cc-d1d90fc165eb"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/14851d29-eb3f-431e-84cc-d1d90fc165eb"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-021"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Roman Pavement/Removal 15"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/c89534ea-27ef-4989-9330-188925c983b8"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m"Drawing d-e-021 from Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/<em>Roman</em> Pavement/Removal"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-e-021.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:34Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:02:54Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-11-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-gen-011"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/e4c24bfd-6199-4d66-9af2-392ee138a247"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-11-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.13832000374021[39;49;00m,
[34m29.975190517527544[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-11-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/e4c24bfd-6199-4d66-9af2-392ee138a247"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/e4c24bfd-6199-4d66-9af2-392ee138a247"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-gen-011"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Terrace 1/Sphinx Temple"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/1a934b66-8bd6-476e-94c1-fdb51c456aa9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" including: Sphinx Temple NW Corner, Sphinx North Forepaw, Pavement, <em>Roman</em> Stairs, North Ledge, Amenhotep II"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings-B/d-gen-011.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:02:55Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:20:43Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-12-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-024"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/af4246af-16a1-4770-b00a-f16f774dacf9"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-12-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-12-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/af4246af-16a1-4770-b00a-f16f774dacf9"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/af4246af-16a1-4770-b00a-f16f774dacf9"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-024"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" amount of deposits immediately south of the <em>Roman</em> Stairs, between the stairs and the Sound and Light box"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-e-024.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:36Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:02:37Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-13-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"2672"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/99496876-67e8-408f-8da6-212f515ef67f"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-13-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m12.719486146888293[39;49;00m,
[34m41.88774442934475[39;49;00m
]
},
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-rec-when-13-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-0674"[39;49;00m,
[94m"stop"[39;49;00m: [33m"0001"[39;49;00m
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-13-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/99496876-67e8-408f-8da6-212f515ef67f"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/99496876-67e8-408f-8da6-212f515ef67f"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"2672"[39;49;00m,
[94m"project label"[39;49;00m: [33m"The Gabii Project"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/3585b372-8d2d-436c-9a4c-b5c10fce3ccd"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Italy/Gabii/Area B/Unit 1242"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/4d680e71-b622-42eb-9577-bd0eec2a8251"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-675.0[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m1.0[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" Priority Medium \n File Attachment Filename 2672.pdf \n \n<em>Roman</em> Republican\nhttp://n2t.net/ark:/99152"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://opencontext.org/static/oc/images/icons/3d-noun-37529.png"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-06-04T23:26:17Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-01-31T08:20:27Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-14-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"2770"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/9d2ca9a6-127a-475f-8698-6acaf47f3967"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-14-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m12.719685707676424[39;49;00m,
[34m41.88771672158477[39;49;00m
]
},
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-rec-when-14-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-0899"[39;49;00m,
[94m"stop"[39;49;00m: [33m"0200"[39;49;00m
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-14-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/9d2ca9a6-127a-475f-8698-6acaf47f3967"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/9d2ca9a6-127a-475f-8698-6acaf47f3967"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"2770"[39;49;00m,
[94m"project label"[39;49;00m: [33m"The Gabii Project"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/3585b372-8d2d-436c-9a4c-b5c10fce3ccd"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Italy/Gabii/Area B/Unit 1386"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/321b8a74-9782-450b-93e2-82d963cb2f8f"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-900.0[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m200.0[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" Medium \n File Attachment Filename 2770.pdf \n \n<em>Roman</em> Republican\nhttp://n2t.net/ark:/99152/p0qhb66mvjk"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://opencontext.org/static/oc/images/icons/3d-noun-37529.png"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-06-04T23:32:24Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-01-31T08:18:05Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-15-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-042"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/f9d43eb2-12bd-4f8a-9f78-4ecba5406e07"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-15-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-15-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/f9d43eb2-12bd-4f8a-9f78-4ecba5406e07"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/f9d43eb2-12bd-4f8a-9f78-4ecba5406e07"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-042"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/5714b49e-a383-4738-9235-29841bfaaba0"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" \n Resource Type Drawings \n Keywords Chapel Forepaws Masonry <em>Roman</em> Pavement \n Scale 1:50 \n \n Creator"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings-B/d-e-042.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:01:46Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:06:48Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-16-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-066"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/8b650f2e-2196-45d1-aa17-535943293d23"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-16-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-16-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/8b650f2e-2196-45d1-aa17-535943293d23"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/8b650f2e-2196-45d1-aa17-535943293d23"[39;49;00m,
[94m"citation uri"[39;49;00m: [33m"http://n2t.net/ark:/28722/k2nv9pv9c"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-066"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m") R14 \n \n Description R14 was the removal of a small amount of deposits immediately south of the <em>Roman</em>"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings-B/d-e-066.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:02:02Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:04:33Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-17-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-064"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/0bc963d4-40da-406c-b395-696ea87618be"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-17-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137937830616877[39;49;00m,
[34m29.97526657483614[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-17-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/0bc963d4-40da-406c-b395-696ea87618be"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/0bc963d4-40da-406c-b395-696ea87618be"[39;49;00m,
[94m"citation uri"[39;49;00m: [33m"http://n2t.net/ark:/28722/k2sn0dp45"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-e-064"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx East/Removal 14"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/37cbe2aa-1877-4eba-b3a5-45c43d13abc9"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" was the removal of a small amount of deposits immediately south of the <em>Roman</em> Stairs, between the"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings-B/d-e-064.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:02:01Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:02:22Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-18-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"2843"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/9409bf05-af66-4314-b78c-dec7d3a58c77"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-18-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m12.719678174242606[39;49;00m,
[34m41.88768863730048[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-18-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/9409bf05-af66-4314-b78c-dec7d3a58c77"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/9409bf05-af66-4314-b78c-dec7d3a58c77"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"2843"[39;49;00m,
[94m"project label"[39;49;00m: [33m"The Gabii Project"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/3585b372-8d2d-436c-9a4c-b5c10fce3ccd"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Italy/Gabii/Area B/Unit 1468"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/fadbab7d-868a-4b20-b9b9-b54f5544d156"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" Attachment Priority High \n File Attachment Filename 2843.pdf \n \n<em>Roman</em> Republican\nhttp://n2t.net/ark:/99152"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://opencontext.org/static/oc/images/icons/3d-noun-37529.png"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-06-04T23:33:47Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-01-31T08:17:31Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-19-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"2709"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/eeee731e-4394-452b-871d-8b82add85ae2"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-19-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m12.719596721093325[39;49;00m,
[34m41.88778964097052[39;49;00m
]
},
[94m"when"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#event-rec-when-19-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"oc-gen:formation-use-life"[39;49;00m,
[94m"start"[39;49;00m: [33m"-0399"[39;49;00m,
[94m"stop"[39;49;00m: [33m"0900"[39;49;00m
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-19-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/eeee731e-4394-452b-871d-8b82add85ae2"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/eeee731e-4394-452b-871d-8b82add85ae2"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"2709"[39;49;00m,
[94m"project label"[39;49;00m: [33m"The Gabii Project"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/3585b372-8d2d-436c-9a4c-b5c10fce3ccd"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Italy/Gabii/Area B/Unit 1321"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/afe93991-1894-42eb-a1d8-93a3d03dfa6c"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34m-400.0[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34m900.0[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m" \n File Attachment Filename 2709.pdf \n \n<em>Roman</em> Republican\nhttp://n2t.net/ark:/99152/p0qhb66mvjk\nTemporal"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://opencontext.org/static/oc/images/icons/3d-noun-37529.png"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-06-04T23:31:38Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-01-31T08:21:34Z"[39;49;00m
}
},
{
[94m"id"[39;49;00m: [33m"#record-20-of-77460"[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-se-001"[39;49;00m,
[94m"rdfs:isDefinedBy"[39;49;00m: [33m"http://opencontext.org/media/10a6fd7d-e922-407c-91f1-c492b3b1e6ef"[39;49;00m,
[94m"type"[39;49;00m: [33m"Feature"[39;49;00m,
[94m"category"[39;49;00m: [33m"oc-api:geo-record"[39;49;00m,
[94m"geometry"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#geo-rec-geom-20-of-77460"[39;49;00m,
[94m"type"[39;49;00m: [33m"Point"[39;49;00m,
[94m"coordinates"[39;49;00m: [
[34m31.137930070235992[39;49;00m,
[34m29.97510631209676[39;49;00m
]
},
[94m"properties"[39;49;00m: {
[94m"id"[39;49;00m: [33m"#rec-20-of-77460"[39;49;00m,
[94m"feature-type"[39;49;00m: [33m"item record"[39;49;00m,
[94m"uri"[39;49;00m: [33m"http://opencontext.org/media/10a6fd7d-e922-407c-91f1-c492b3b1e6ef"[39;49;00m,
[94m"href"[39;49;00m: [33m"https://opencontext.org/media/10a6fd7d-e922-407c-91f1-c492b3b1e6ef"[39;49;00m,
[94m"citation uri"[39;49;00m: [34mfalse[39;49;00m,
[94m"label"[39;49;00m: [33m"Drawing d-se-001"[39;49;00m,
[94m"project label"[39;49;00m: [33m"ARCE Sphinx Project 1979-1983 Archive"[39;49;00m,
[94m"project href"[39;49;00m: [33m"https://opencontext.org/projects/141e814a-ba2d-4560-879f-80f1afb019e9"[39;49;00m,
[94m"context label"[39;49;00m: [33m"Egypt/Giza/Sphinx Amphitheater/Sphinx Ditch/Sphinx Southeast/Feature FSEd3"[39;49;00m,
[94m"context href"[39;49;00m: [33m"https://opencontext.org/subjects/3013a0b2-dfc8-419c-a1ee-23ad344e7d4f"[39;49;00m,
[94m"early bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"late bce/ce"[39;49;00m: [34mfalse[39;49;00m,
[94m"item category"[39;49;00m: [33m"Image"[39;49;00m,
[94m"snippet"[39;49;00m: [33m", south of <em>Roman</em> pavement \n \n Year \n Resource Type Drawings \n Keywords Bedrock Depression Floor"[39;49;00m,
[94m"thumbnail"[39;49;00m: [33m"https://artiraq.org/static/opencontext/giza-sphinx/thumbs/Drawings/d-se-001.jpg"[39;49;00m,
[94m"published"[39;49;00m: [33m"2017-12-23T01:04:03Z"[39;49;00m,
[94m"updated"[39;49;00m: [33m"2018-03-23T01:05:26Z"[39;49;00m
}
}
]
}
###Markdown
11. Finally, export the JSON to as a file. `import json``with open('OCromandata.json', 'w') as outfile:json.dump(data, outfile)`
###Code
import json
with open('OCromandata.json', 'w') as outfile:
json.dump(data, outfile)
###Output
_____no_output_____
|
notebooks/bulldozer-project/milestone-project-2.ipynb
|
###Markdown
🚜 Predicting the Sale Price of Bulldozers using Machine Learning 1. Problem Definition> How well can we predict the future sales price of a bulldozer, given its characteristic and previous sales data for similar bulldozers? 2. DataThe data is downloaded from the Kaggle Blue Book for Bulldozers competition: [Blue Book for Bulldozers](https://www.kaggle.com/c/bluebook-for-bulldozers/overview)The data for this competition is split into three parts:* Train.csv is the training set, which contains data through the end of 2011.* Valid.csv is the validation set, which contains data from January 1, 2012 - April 30, 2012 You make predictions on this set throughout the majority of the competition. Your score on this set is used to create the public leaderboard.* Test.csv is the test set, which won't be released until the last week of the competition. It contains data from May 1, 2012 - November 2012. Your score on the test set determines your final rank for the competition.The key fields are in train.csv are:* SalesID: the uniue identifier of the sale* MachineID: the unique identifier of a machine. A machine can be sold multiple times* saleprice: what the machine sold for at auction (only provided in train.csv)* saledate: the date of the saleThere are several fields towards the end of the file on the different options a machine can have. The descriptions all start with "machine configuration" in the data dictionary. Some product types do not have a particular option, so all the records for that option variable will be null for that product type. Also, some sources do not provide good option and/or hours data.The machine_appendix.csv file contains the correct year manufactured for a given machine along with the make, model, and product class details. There is one machine id for every machine in all the competition datasets (training, evaluation, etc.). 3. EvaluationThe evaluation metric for this competition is the RMSLE (root mean squared log error) between the actual and predicted auction prices. See [Kaggle](https://www.kaggle.com/c/bluebook-for-bulldozers/overview/evaluation).**Note:** The goal is to minimize the RMSLE. 4. FeaturesKaggle provides a data dictionary for the features.See [Kaggle Data Dictionary](https://www.kaggle.com/c/bluebook-for-bulldozers/data). Imports And Data Exploration
###Code
# Regular EDA (exploratory data analysis) and plotting libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
%matplotlib inline
# Models from Scikit-Learn
from sklearn.ensemble import RandomForestRegressor
# Model Evaluations
from sklearn.model_selection import cross_val_predict, cross_val_score, train_test_split, RandomizedSearchCV, GridSearchCV
from sklearn.metrics import confusion_matrix, classification_report, mean_absolute_error, mean_squared_error, mean_squared_log_error, r2_score
# import training
df = pd.read_csv('../../data/bulldozers/TrainAndValid.csv', low_memory=False)
df.info()
df.isna().sum()
df.saledate.dtype
fig, ax = plt.subplots(figsize=(10,6))
scatter = ax.scatter(x=df['saledate'][:1000],
y=df['SalePrice'][:1000])
ax.set(title='Sale Date and Sale Price Data')
ax.set_xticklabels([]);
df.SalePrice.plot.hist();
###Output
_____no_output_____
###Markdown
Parsing DatesTime series data: enrich time & date component as much as possible -> use `parse_dates` parameter for pandas.
###Code
# import data again, but this time parse dates
df = pd.read_csv('../../data/bulldozers/TrainAndValid.csv',
low_memory=False,
parse_dates=['saledate'])
df.saledate.dtype
df.saledate[:10]
fig, ax = plt.subplots(figsize=(10,6))
scatter = ax.scatter(x=df['saledate'][:1000],
y=df['SalePrice'][:1000])
ax.set(title='Sale Date and Sale Price Data');
df.head()
df.head().T
df.saledate.head(10)
###Output
_____no_output_____
###Markdown
Sort DataFrame by saledateWhen working with time series data, its a good idae to sort it by date.
###Code
# Sort DataFrame in date order
df.sort_values(by=['saledate'], inplace=True, ascending=True)
df.saledate.head(10)
###Output
_____no_output_____
###Markdown
Make a copy of the original DataFrameWe make a copy of the original dataframe so when we manipulate the copy, we've still got our original data.
###Code
# Make a copy
df_tmp = df.copy()
###Output
_____no_output_____
###Markdown
Add datetime parameters for `saledate` column
###Code
df_tmp['saleYear'] = df_tmp.saledate.dt.year
df_tmp['saleMonth'] = df_tmp.saledate.dt.month
df_tmp['saleDay'] = df_tmp.saledate.dt.day
df_tmp['saleDayOfWeek'] = df_tmp.saledate.dt.dayofweek
df_tmp['saleDayOfYear'] = df_tmp.saledate.dt.dayofyear
df_tmp.head().T
# Now we've enriched the DataFrame with datetime features, we can remove the saleDate column
df_tmp.drop('saledate', axis=1, inplace=True)
# Check the value of different columns
df_tmp.state.value_counts()
df_tmp.head()
###Output
_____no_output_____
###Markdown
Convert string to categoriesOne way to convert strings into numbers is by converting them into categories.https://pandas.pydata.org/pandas-docs/stable/user_guide/categorical.html
###Code
pd.api.types.is_string_dtype(df_tmp['UsageBand'])
# Find the columns which contain strings
[label for label, content in df_tmp.items() if pd.api.types.is_string_dtype(content)]
# Convert all columns with string values into category values
for label, content in df_tmp.items():
if pd.api.types.is_string_dtype(content):
df_tmp[label] = content.astype('category').cat.as_ordered()
df.info()
df_tmp.isna().sum()
df_tmp.info()
df_tmp.state.cat.categories
df_tmp.state.cat.codes
###Output
_____no_output_____
###Markdown
Now all columns that contained strings, have numerical valuesBut there's still missing data...
###Code
# Check missing data
df_tmp.isnull().sum()/len(df_tmp)
###Output
_____no_output_____
###Markdown
Save preprocessed data
###Code
# Export current dmp dataframe
df_tmp.to_csv('../../data/bulldozers/train_and_valid_tmp.csv',
index=False)
# Import preprocessed data
df_tmp = pd.read_csv('../../data/bulldozers/train_and_valid_tmp.csv',
low_memory=False)
df_tmp.head().T
###Output
_____no_output_____
###Markdown
Fill missing values Fill numeric values first
###Code
# Find the columns with numeric type
[label for label, content in df_tmp.items() if pd.api.types.is_numeric_dtype(content)]
# Check for which numeric columns have null values
for label, content in df_tmp.items():
if pd.api.types.is_numeric_dtype(content):
if pd.isnull(content).sum():
print(label)
# Fill numeric null columns with median
for label, content in df_tmp.items():
if pd.api.types.is_numeric_dtype(content):
if pd.isnull(content).sum():
# Add a binary column which tells us if data was missing
df_tmp[label+"_is_missing"] = pd.isnull(content)
# Fill missing numeric values with median
df_tmp[label] = content.fillna(content.median())
# Find the columns with numeric type
[label for label, content in df_tmp.items() if pd.api.types.is_numeric_dtype(content) if pd.isnull(content).sum()]
# Check how many examples were missing
df_tmp.auctioneerID_is_missing.value_counts()
df_tmp.isna().sum()
###Output
_____no_output_____
###Markdown
Filling categorical variables into numbers
###Code
# Find the columns with non-numeric type which have missing values
[(label, pd.isnull(content).sum()) for label, content in df_tmp.items() if not pd.api.types.is_numeric_dtype(content) if pd.isnull(content).sum()]
df_tmp.info()
# Turn categorical variables into numbers and fill missing
for label, content in df_tmp.items():
if not pd.api.types.is_numeric_dtype(content):
# Add binary column to indicate whether sample had missing value
df_tmp[label+"_is_missing"] = pd.isnull(content)
# Turn categories into numbers and add +1
df_tmp[label] = pd.Categorical(content).codes+1
pd.Categorical(df_tmp.state).codes
pd.Categorical(df_tmp['UsageBand']).codes
df_tmp.info()
df_tmp.isna().sum()
###Output
_____no_output_____
###Markdown
5. ModellingAfter some cursory EDA (you can always do more) let's start to do some model-driven EDA.
###Code
# Split data into training and validation
df_val = df_tmp[df_tmp.saleYear == 2012]
df_train = df_tmp[df_tmp.saleYear != 2012]
# Split data into X & y
X_train, y_train = df_train.drop('SalePrice', axis=1), df_train['SalePrice']
X_valid, y_valid = df_val.drop('SalePrice', axis=1), df_val['SalePrice']
X_train.shape, y_train.shape, X_valid.shape, y_valid.shape
###Output
_____no_output_____
###Markdown
Building an evaluation function
###Code
# Create evaluation function RMSLE
def rmsle(y_true, y_pred):
"""
Calculates root mean squared log error between predictions and
true labels.
"""
return np.sqrt(mean_squared_log_error(y_true, y_pred))
# Create function to evaluate model on a few different levels
def show_scores(model):
"""
Shows scoring metrics for different data sets.
"""
train_preds = model.predict(X_train)
val_preds = model.predict(X_valid)
scores = {'Training MAE': mean_absolute_error(y_train, train_preds),
'Valid MAE': mean_absolute_error(y_valid, val_preds),
'Training RMSLE': rmsle(y_train, train_preds),
'Valid RMSLE': rmsle(y_valid, val_preds),
'Training R^2': r2_score(y_train, train_preds),
'Valid R^2': r2_score(y_valid, val_preds)}
return scores
# Instantiate Random Forest Regressor with subset of data
rf_clf = RandomForestRegressor(n_jobs=-1,
random_state=42,
max_samples=10000)
%%time
rf_clf.fit(X_train, y_train)
rf_clf_scores = show_scores(rf_clf)
rf_clf_scores
###Output
_____no_output_____
###Markdown
Experiment with Reducing Features using PCA
###Code
# Make pipeline with PCA and RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
pca = PCA(n_components=20, whiten=True, random_state=42)
rf = RandomForestRegressor(n_jobs=-1, random_state=42)
pca_rf_model = make_pipeline(pca, rf)
pca_rf_model
# create hyperparameter grid for RandomForest
rf_param_grid = {'randomforestregressor__n_estimators': np.arange(10, 1000, 50),
'randomforestregressor__max_depth': [None, 3, 5, 10],
'randomforestregressor__min_samples_split': np.arange(2, 20, 2),
'randomforestregressor__min_samples_leaf': np.arange(1, 20, 2)}
rf_grid = RandomizedSearchCV(pca_rf_model, rf_param_grid, cv=5, n_iter=5, verbose=True)
# Fit data to grid
%time
rf_grid.fit(X_train[:1000], y_train[:1000])
rf_grid.best_params_
pca_rf_tuned_model = rf_grid.best_estimator_
pca_rf_tuned_model.score(X_valid, y_valid)
show_scores(pca_rf_tuned_model)
###Output
_____no_output_____
###Markdown
Hyperparameter Tuning with RandomizedSearchCV
###Code
# Instantiate Random Forest Regressor with subset of data
rf_clf = RandomForestRegressor(n_jobs=-1,
random_state=42)
# create hyperparameter grid for RandomForest
rf_param_grid = {'n_estimators': np.arange(10, 1000, 50),
'max_depth': [None, 3, 5, 10],
'min_samples_split': np.arange(2, 20, 2),
'min_samples_leaf': np.arange(1, 20, 2),
'max_features': [0.5, 1, 'sqrt', 'auto'],
'max_samples': [10000]}
rs_rf = RandomizedSearchCV(rf_clf,
rf_param_grid,
cv=2,
n_iter=5,
verbose=True)
# Fit model
%time
rs_rf.fit(X_train, y_train)
# Find the best parameters
rs_rf.best_estimator_, rs_rf.best_params_
# Set up tuned model with best params
rs_rf_best = rs_rf.best_estimator_
rs_rf_best_scores = show_scores(rs_rf_best)
rs_rf_best_scores
###Output
_____no_output_____
###Markdown
Train a Model With The Best (Given) Hyperparameters**Note**: Found after 100 iterations by instructor of course.
###Code
%%time
# Ideal hyperparameters
ideal_rf = RandomForestRegressor(n_estimators=40,
min_samples_leaf=1,
min_samples_split=14,
max_features=0.5,
n_jobs=-1,
max_samples=None,
random_state=42)
# Fit the model
ideal_rf.fit(X_train, y_train)
# Scores for ideal RandomForest model (trained on all the data)
show_scores(ideal_rf)
# Scores on rs_model (trained on subset of 10,000 rows)
rf_clf_scores
###Output
_____no_output_____
###Markdown
Make predictions on test data
###Code
# Import test data
df_test = pd.read_csv('../../data/bulldozers/Test.csv',
low_memory=False,
parse_dates=['saledate'])
df_test.head().T
df_test.info()
###Output
<class 'pandas.core.frame.DataFrame'>
RangeIndex: 12457 entries, 0 to 12456
Data columns (total 52 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 SalesID 12457 non-null int64
1 MachineID 12457 non-null int64
2 ModelID 12457 non-null int64
3 datasource 12457 non-null int64
4 auctioneerID 12457 non-null int64
5 YearMade 12457 non-null int64
6 MachineHoursCurrentMeter 2129 non-null float64
7 UsageBand 1834 non-null object
8 saledate 12457 non-null datetime64[ns]
9 fiModelDesc 12457 non-null object
10 fiBaseModel 12457 non-null object
11 fiSecondaryDesc 8482 non-null object
12 fiModelSeries 2006 non-null object
13 fiModelDescriptor 3024 non-null object
14 ProductSize 6048 non-null object
15 fiProductClassDesc 12457 non-null object
16 state 12457 non-null object
17 ProductGroup 12457 non-null object
18 ProductGroupDesc 12457 non-null object
19 Drive_System 2759 non-null object
20 Enclosure 12455 non-null object
21 Forks 6308 non-null object
22 Pad_Type 2108 non-null object
23 Ride_Control 4241 non-null object
24 Stick 2108 non-null object
25 Transmission 4818 non-null object
26 Turbocharged 2108 non-null object
27 Blade_Extension 651 non-null object
28 Blade_Width 651 non-null object
29 Enclosure_Type 651 non-null object
30 Engine_Horsepower 651 non-null object
31 Hydraulics 10315 non-null object
32 Pushblock 651 non-null object
33 Ripper 2704 non-null object
34 Scarifier 651 non-null object
35 Tip_Control 651 non-null object
36 Tire_Size 2778 non-null object
37 Coupler 7601 non-null object
38 Coupler_System 2066 non-null object
39 Grouser_Tracks 2066 non-null object
40 Hydraulics_Flow 2066 non-null object
41 Track_Type 3394 non-null object
42 Undercarriage_Pad_Width 3398 non-null object
43 Stick_Length 3394 non-null object
44 Thumb 3395 non-null object
45 Pattern_Changer 3394 non-null object
46 Grouser_Type 3394 non-null object
47 Backhoe_Mounting 2051 non-null object
48 Blade_Type 2058 non-null object
49 Travel_Controls 2058 non-null object
50 Differential_Type 2129 non-null object
51 Steering_Controls 2129 non-null object
dtypes: datetime64[ns](1), float64(1), int64(6), object(44)
memory usage: 4.9+ MB
###Markdown
Preprocessing test dataset
###Code
def preprocess_data(df):
"""
Preprocess a dataset and returns transformed df.
"""
# Add date columns, drop saledate
df['saleYear'] = df.saledate.dt.year
df['saleMonth'] = df.saledate.dt.month
df['saleDay'] = df.saledate.dt.day
df['saleDayOfWeek'] = df.saledate.dt.dayofweek
df['saleDayOfYear'] = df.saledate.dt.dayofyear
df.drop('saledate', axis=1, inplace=True)
# Fill the numeric columns with median
for label, content in df.items():
if pd.api.types.is_numeric_dtype(content):
if pd.isnull(content).sum():
# Add a binary column which tells us if data was missing
df[label+"_is_missing"] = pd.isnull(content)
# Fill missing numeric values with median
df[label] = content.fillna(content.median())
# Fill categorical missing data and turn categories into numbers
if not pd.api.types.is_numeric_dtype(content):
# Add binary column to indicate whether sample had missing value
df[label+"_is_missing"] = pd.isnull(content)
# Turn categories into numbers and add +1
df[label] = pd.Categorical(content).codes+1
return df
# Preprocesses test data
df_test_tmp = preprocess_data(df_test)
df_test_tmp.shape
# Try prediction
y_preds = ideal_rf.predict(df_test_tmp)
# Find different columns using sets
set(X_train.columns) - set(df_test_tmp.columns)
# Manually adjust df_test to have auctioneerID_is_missing_column
# (Test dataset had no missing auctioneerID)
df_test_tmp['auctioneerID_is_missing'] = False
df_test_tmp.head().T
###Output
_____no_output_____
###Markdown
Now test dataset has the same features as training dataset
###Code
# Make predictions on test data
test_preds = ideal_rf.predict(df_test_tmp)
test_preds
###Output
_____no_output_____
###Markdown
https://www.kaggle.com/c/bluebook-for-bulldozers/overview/evaluationSubmission files should be formatted as follows:* Have a header: "SalesID,SalePrice"* Contain two columns * SalesID: SalesID for the validation set in sorted order * SalePrice: Your predicted price of the sale
###Code
# Format predictions into required Kaggle format
df_preds = pd.DataFrame()
df_preds['SalesID'] = df_test_tmp['SalesID']
df_preds['SalesPrice'] = test_preds
df_preds
# Export prediction data
df_preds.to_csv('../../data/bulldozers/test_predictions.csv')
###Output
_____no_output_____
###Markdown
Feature ImportanceFeature importance seeks to figure out which attributes of the dat were the most important when it comes to predicting the **target variable** (SalePrice).
###Code
# Find feature importance of our best model
len(ideal_rf.feature_importances_)
len(X_train.columns)
# Helper function for plotting feature importance
def plot_features(columns, importances, n=20):
df = (pd.DataFrame({'features': columns,
'feature_importances': importances})
.sort_values('feature_importances', ascending=False)
.reset_index(drop=True))
# Plot dataframe
fig, ax = plt.subplots(figsize=(10,6))
ax.barh(df.features[:n], df.feature_importances[:20])
ax.set_ylabel('Features')
ax.set_xlabel('Feature Importance')
ax.invert_yaxis();
plot_features(X_train.columns, ideal_rf.feature_importances_)
df['ProductSize'].value_counts()
df['fiSecondaryDesc'].value_counts()
df['Enclosure'].value_counts()
df_tmp['Coupler_System_is_missing'].value_counts()
###Output
_____no_output_____
###Markdown
Correlation Matrix
###Code
# Create correlation matrix
corr_matrix = df_tmp.corr().abs()
# Select upper triangle of matrix
upper = corr_matrix.where(np.triu(np.ones(corr_matrix.shape), k=1).astype(np.bool))
# Find index of feature columns with correlation greater than 0.95
to_drop = [column for column in upper.columns if any(upper[column] > 0.95)]
# Drop features
df_dropped = df_tmp.drop(df_tmp[to_drop], axis=1)
df_dropped.head()
df_dropped.info()
# Split data into training and validation
df_val = df_dropped[df_dropped.saleYear == 2012]
df_train = df_dropped[df_dropped.saleYear != 2012]
# Split data into X & y
X_train, y_train = df_train.drop('SalePrice', axis=1), df_train['SalePrice']
X_valid, y_valid = df_val.drop('SalePrice', axis=1), df_val['SalePrice']
X_train.shape, y_train.shape, X_valid.shape, y_valid.shape
# Instantiate Random Forest Regressor with subset of data
rf_clf = RandomForestRegressor(n_jobs=-1,
random_state=42)
# create hyperparameter grid for RandomForest
rf_param_grid = {'n_estimators': np.arange(10, 1000, 50),
'max_depth': [None, 3, 5, 10],
'min_samples_split': np.arange(2, 20, 2),
'min_samples_leaf': np.arange(1, 20, 2),
'max_features': [0.5, 1, 'sqrt', 'auto'],
'max_samples': [10000]}
rs_rf_dropped = RandomizedSearchCV(rf_clf,
rf_param_grid,
cv=5,
n_iter=100,
verbose=True)
# Fit model
%time
rs_rf_dropped.fit(X_train, y_train)
rs_rf_dropped.best_params_, rs_rf_dropped.best_estimator_
rs_rf_dropped_scores = show_scores(rs_rf_dropped)
rs_rf_dropped_scores
###Output
_____no_output_____
###Markdown
Preprocess test dataset for dropped correlated features
###Code
# Find different columns using sets
to_drop = list(set(df_test_tmp.columns) - set(X_train.columns))
# Drop those columns from test dataset
df_test_dropped = df_test_tmp.drop(to_drop, axis=1)
df_test_dropped.shape
# Make predictions on test data
test_preds = rs_rf_dropped.predict(df_test_dropped)
test_preds
# Format predictions into required Kaggle format
df_preds_dropped = pd.DataFrame()
df_preds_dropped['SalesID'] = df_test_tmp['SalesID']
df_preds_dropped['SalesPrice'] = test_preds
df_preds_dropped
# Export prediction data
df_preds_dropped.to_csv('../../data/bulldozers/test_predictions_dropped_features.csv')
###Output
_____no_output_____
|
src/processingNets.ipynb
|
###Markdown
Aproch 1 threshold + intervaled
###Code
import pandas as pd
import numpy as np
#parameters
#thre =np.linspace(0, 1, 11) #
thre= [0.0, 0.1, 0.2, 0.3 ,0.4, 0.5, 0.6, 0.7, 0.8, 0.9 ]
#thre= [0.0, 0.05, 0.1, 0.15, 0.2,0.25, 0.3,0.35 ,0.4,0.45, 0.5, 1.1]
reactions = pd.read_csv('ReactionMetabolites_list/R22_reactions.txt',sep="\t")
metabolites= pd.read_csv('ReactionMetabolites_list/R22_metabolites.txt',sep="\t")
#reading
for class_label in ['normal', 'cancer']:
df = pd.read_csv('weightedNetworks/R22_'+class_label+'.txt',sep="\t").dropna() #remove rows with NaN #R22_normal
patients= df.columns.values
#filtering by threshold
for i in range(0,len(thre)-1):
#selecting colunms
for col in range(2,107):#105 patients
data_thre=df[(df.iloc[:,col] >= thre[i]) & (df.iloc[:,col] <thre[i+1])]
#writing
data_thre.iloc[:,[0,1]].to_csv('approaches/thre_interv/'+ class_label+'/'+patients[col]+'_th='+str(round(thre[i], 3))+'_'+str(round(thre[i+1], 3))+'.txt', sep='\t', index=False,header=False) #, decimal=',')
#Filtering
# part1= dtemp[dtemp['NodeA'].isin(metabolites['ID'].values.tolist())] #columns on left with metabolites only
# part2= dtemp[dtemp['NodeA'].isin(reactions['ID'].values.tolist())] #columns on left with reactions only
# print(part1['NodeA'].size+part2['NodeA'].size)
###Output
_____no_output_____
###Markdown
Aproch 2 threshold < than
###Code
import pandas as pd
import numpy as np
#parameters
#thre =np.linspace(0, 1, 11) #
thre= [0.1, 0.2, 0.3 ,0.4, 0.5, 0.6, 0.7, 0.8, 0.9 ]
reactions = pd.read_csv('ReactionMetabolites_list/R22_reactions.txt',sep="\t")
metabolites= pd.read_csv('ReactionMetabolites_list/R22_metabolites.txt',sep="\t")
#reading
for class_label in ['normal', 'cancer']:
df = pd.read_csv('weightedNetworks/R22_'+class_label+'.txt',sep="\t").dropna() #remove rows with NaN #R22_normal
patients= df.columns.values
#filtering by threshold
for i in range(0,len(thre)-1):
#selecting colunms
for col in range(2,107):#105 patients
data_thre=df[(df.iloc[:,col] <thre[i])]
#writing
data_thre.iloc[:,[0,1]].to_csv('approaches/thre_lessthan/'+ class_label+'/'+patients[col]+'_th='+str(round(thre[i], 3))+'.txt', sep='\t', index=False,header=False) #, decimal=',')
###Output
_____no_output_____
###Markdown
save weighted networks
###Code
import pandas as pd
import numpy as np
#parameters
reactions = pd.read_csv('ReactionMetabolites_list/R22_reactions.txt',sep="\t")
metabolites= pd.read_csv('ReactionMetabolites_list/R22_metabolites.txt',sep="\t")
#reading
for class_label in ['normal', 'cancer']:
df = pd.read_csv('weightedNetworks/R22_'+class_label+'.txt',sep="\t").dropna() #remove rows with NaN #R22_normal
patients= df.columns.values
#selecting colunms
for col in range(2,107):#105 patients
#writing
df.iloc[:,[0,1,col]].to_csv('approaches/weigth/'+ class_label+'/'+patients[col]+'.txt', sep='\t', index=False,header=False) #, decimal=',')
import matplotlib.pyplot as plt
import networkx as nx
import pandas as pd
import numpy as np
###Output
_____no_output_____
###Markdown
Extracting network measurements
###Code
#!/usr/bin/python3
from functionHierarchical import hierarchical_degree
def tic():
#Homemade version of matlab tic and toc functions
import time
global startTime_for_tictoc
startTime_for_tictoc = time.time()
def toc():
import time
if 'startTime_for_tictoc' in globals():
print("Elapsed time is " + str(time.time() - startTime_for_tictoc) + " seconds.")
else:
print("Toc: start time not set")
def extract_measures(G):
"""Extract a vector of measurements
"""
num_nodes = nx.number_of_nodes(G)
num_edges = nx.number_of_edges(G)
if num_nodes == 0:
raise Exception('graph is empty')
else:
F = {} #features
""" Global """
#F['num_nodes'] = num_nodes
F['num_edges'] = num_edges
#H = hierarchical_degree(G, nivel_max=3)
#F['avg_degree'] = np.average(H[0,:])
#F['avg_hier2'] = np.average(H[1,:])
#F['avg_hier3'] = np.average(H[2,:])
##F['avg_clust'] = nx.average_clustering(G) #0, para este caso todas as redes obtiver 0
##tic() #avgpathlenght is expensive
#F['avg_path'] = nx.algorithms.average_shortest_path_length(G)
#F['pearson'] = nx.algorithms.degree_pearson_correlation_coefficient(G) #-0.18656860948991077
#F['avg_leff'] = nx.local_efficiency(G)
#F['avg_geff'] = nx.global_efficiency(G)
#F['avg_trans'] = nx.algorithms.transitivity(G) #0
""" Local measures """
#ecc = nx.algorithms.eccentricity(G) # [0.01408451 0. 0.32394366 0. 0.66197183]
#tic() #betweeness is expensive
#bet = nx.algorithms.betweenness_centrality(G)# [0.97183099 0. 0. 0.01408451 0.01408451]
#katz = nx.algorithms.katz_centrality(G) # [0.67605634 0.29577465 0. 0. 0.02816901]
#clust = nx.algorithms.clustering(G) #[0. 0. 1. 0. 0.] O CLUSTERING NESTAS REDES DA O
#sqclust= nx.algorithms.square_clustering(G) #[0.64788732 0.04225352 0.02816901 0.01408451 0.26760563]
#close = nx.algorithms.closeness_centrality(G) #[0.64788732 0.32394366 0. 0. 0.02816901]
#degcen = nx.algorithms.degree_centrality(G) #[0.97183099 0. 0. 0. 0.02816901]
#F['avg_ecc'] = np.average(list(ecc.values()))
#F['avg_bet'] = np.average(list(bet.values()))
#F['avg_katz'] = np.average(list(katz.values()))
#F['avg_sqclust']= np.average(list(sqclust.values()))
#F['avg_close'] = np.average(list(close.values()))
#F['avg_degcen'] = np.average(list(degcen.values()))
return F
# def histo_descriptor(array, bins):
# hist, bin_edges = np.histogram(array, bins = bins)
# return hist/sum(hist) #density_histo
###Output
_____no_output_____
###Markdown
Model as networks
###Code
#reactions = pd.read_csv('UNIMIB_data/ReactionMetabolites_list/R22_reactions.txt',sep="\t")
metabolites= pd.read_csv('UNIMIB_data/ReactionMetabolites_list/R22_metabolites.txt',sep="\t")
def read_network(filename):
""" return complete gcc network"""
df = pd.read_csv(filename,sep="\t")
nodeA= df.iloc[:,0]
nodeB= df.iloc[:,1]
peso = df.iloc[:,2]
all_nodes =pd.concat([nodeA, nodeB], ignore_index =True) #ignore_index, i.e re-indexa
G=nx.Graph()
#add nodes
for m in all_nodes[all_nodes.isin(metabolites['ID'])]:
#G.add_node(m, color='r') #metabolite
G.add_node(m) #metabolite
for r in all_nodes[~all_nodes.isin(metabolites['ID'])]:
#G.add_node(r, color='b') #reaction
G.add_node(r) #reaction
#add edges
for i in range(nodeA.size):
e1, e2, p=nodeA.get(i), nodeB.get(i), peso.get(i)
G.add_edge(e1, e2, peso=float(p))
return G
# largest connected component
#G0 = sorted(nx.connected_component_subgraphs(G), key=len, reverse=True)[0]#0 = the largest network
#return G0
def threshold_network_bigger_than(G, thre):
""" filtering by threshold """
filtered=[(u,v) for (u,v,w) in G.edges(data=True) if (w['peso'] >= thre)]
if(len(list(filtered)) == 0):
raise Exception('filtered graph is empty')
else:
# largest connected component
#Gthre = sorted(nx.connected_component_subgraphs(nx.Graph(filtered)), key=len, reverse=True)[0]
#deprecated in version 2.1
# identify largest connected component version 2.4
g=nx.Graph(filtered)
Gcc = sorted(nx.connected_components(g), key=len, reverse=True)
Gthre = G.subgraph(Gcc[0])
#print('thre= [%.4f, %.4f], nodes= %d'% (thre_min, thre_max, nx.number_of_nodes(Gthre)))
return Gthre
#write
# nx.write_graphml(G0_thre, 'TCGA_A7_A0CE_thre=0.7_0.8.graphml')
###Output
_____no_output_____
###Markdown
Main
###Code
G= read_network('approaches/weigth/normal/TCGA_A7_A0CE.txt')
# nx.write_graphml(G, 'TCGA_A7_A0CE.graphml')
print(nx.number_of_nodes(G))
from networkx.algorithms import bipartite
print(bipartite.is_bipartite(G))
#Example
G= read_network('approaches/weigth/cancer/TCGA_A7_A0CE.txt')
thre= [10**-4, 10**-3, 10**-2, 0.1, 0.2, 0.3 ,0.4, 0.5,0.6, 0.7, 0.8, 0.9]
for i in range(len(thre)):
Gthre=threshold_network_bigger_than(G, thre[i])
#Gthre=threshold_network_less_than(G, thre[i])
#nx.write_graphml(G, 'TCGA_A7_A0CE.graphml')
print('thre= %.4f , nodes= %d'% (thre[i], nx.number_of_nodes(Gthre)))
###Output
thre= 0.0001 , nodes= 8036
thre= 0.0010 , nodes= 7793
thre= 0.0100 , nodes= 7472
thre= 0.1000 , nodes= 5875
thre= 0.2000 , nodes= 3890
thre= 0.3000 , nodes= 2494
thre= 0.4000 , nodes= 1924
thre= 0.5000 , nodes= 1065
thre= 0.6000 , nodes= 504
thre= 0.7000 , nodes= 335
thre= 0.8000 , nodes= 242
thre= 0.9000 , nodes= 211
###Markdown
Edges
###Code
#Example
G= read_network('approaches/weigth/cancer/TCGA_A7_A0CE.txt')
thre= [10**-4, 10**-3, 10**-2, 0.1, 0.2, 0.3 ,0.4, 0.5,0.6, 0.7]
for i in range(len(thre)):
Gthre=threshold_network_bigger_than(G, thre[i])
#Gthre=threshold_network_less_than(G, thre[i])
#nx.write_graphml(G, 'TCGA_A7_A0CE.graphml')
print('thre= %.4f , nodes= %d'% (thre[i], nx.number_of_edges(Gthre)))
from os import listdir
from os.path import isfile, join
# Load network
#left-threshold
# thre= [0.0, 10**-4, 10**-3, 10**-2, 0.1, 0.2, 0.3 ,0.4, 0.5,0.6, 1.1]
# thre= [0.0, 0.1, 0.2, 0.3 ,0.4, 0.5,0.6, 1.1]
# thre= [0.0, 10**-4, 10**-3]
#bigger_than threshold
thre= [10**-4, 10**-3, 10**-2, 0.1, 0.2, 0.3 ,0.4, 0.5,0.6, 0.7]
pacientes= [f for f in listdir('approaches/weigth/cancer') if isfile(join('approaches/weigth/cancer', f))]
#cancer has the same pacientes as normal
path= 'approaches/weigth/'
labels= ['cancer', 'normal']
num_pacientes=len(pacientes)
num_medidas=5 #'avg_degree', 'avg_hier2','avg_hier3','avg_path','pearson','avg_bet'
#num_thre=(len(thre)-1) #Range threshold
num_thre =len(thre)#>= threshold
num_descritores= num_thre * num_medidas
features = np.zeros((num_pacientes*2, num_descritores))
k = list()
str_clase=list()
int_clase=list()
atr_names= ['avg_degree', 'avg_hier2','avg_hier3','avg_path','pearson']*num_thre
for c in range(len(labels)):
print(labels[c])
for p in range(num_pacientes):
G= read_network(path+labels[c]+'/'+ pacientes[p]) #reading
print('{}'.format(pacientes[p].replace('.txt','')),sep = "\t", end="\t")
k.append(pacientes[p])
str_clase.append(labels[c])
int_clase.append(c)
#threshold
#v=len(thre)-1 # #Range threshold
v= len(thre) #>= threshold
for i in range(v):
#Gthre=threshold_network_range(G, thre[i], thre[i+1]) # #Range threshold
Gthre=threshold_network_bigger_than(G, thre[i]) #>= threshold
#Gthre=threshold_network_less_than(G, thre[i]) #<= threshold
f=extract_measures(Gthre) #dict
#print(f)
feats= list(f.values())
#atr_names= list(f.keys())
#features[(c*num_pacientes)+p][i*num_medidas:(i*num_medidas)+num_medidas] = feats[:]
#print('%s, thre= [%.4f, %.4f], %s'% (pacientes[p].replace('.txt',''), thre[i], thre[i+1],feats[:] ))
#printing number of edges
print(*feats, sep = "\t", end="\t")
print()
#data = pd.DataFrame(features, columns=f.keys())
data = pd.DataFrame(features)
data
# print(k)
atr_names= ['avg_degree', 'avg_hier2','avg_hier3','avg_path','pearson']*num_thre
# print(atr_names)
data = pd.DataFrame(features, columns=atr_names)
col1 = pd.DataFrame(k,columns= ['paciente'])
col2 = pd.DataFrame(str_clase,columns= ['name_class'])
col3 = pd.DataFrame(int_clase,columns= ['class'])
df = pd.concat([data, col1['paciente'], col2['name_class'],col3['class']], axis = 1)
df = pd.concat([data, col3['class'] ], axis = 1)
# df
#save dataframe to csv
df = pd.concat([data, col1['paciente'], col2['name_class'],col3['class']], axis = 1)
df.to_csv('Arffs/biggerthan.csv', sep='\t', index=False)
#save dataframe to arff
import arff
df = pd.concat([data, col3['class'] ], axis = 1)
arff.dump('biggerthan.arff', df.values, relation='thre=power', names=df.columns)
###Output
_____no_output_____
|
lectures/ml/clustering/Clustering_Code_Example.ipynb
|
###Markdown
Clustering Documentation ExampleK-meansk-means is one of themost commonly used clustering algorithms that clusters the data points into apredefined number of clusters. The MLlib implementation includes a parallelizedvariant of the k-means++ methodcalled kmeans||.KMeans is implemented as an Estimator and generates a KMeansModel as the base model.Input Columns Param name Type(s) Default Description featuresCol Vector "features" Feature vector Output Columns Param name Type(s) Default Description predictionCol Int "prediction" Predicted cluster center
###Code
#Cluster methods Example
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('cluster').getOrCreate()
from pyspark.ml.clustering import KMeans
# Loads data.
dataset = spark.read.format("libsvm").load("sample_kmeans_data.txt")
dataset.show()
final_data = dataset.select("features")
# Trains a k-means model.
kmeans = KMeans().setK(3).setSeed(1)
model = kmeans.fit(final_data)
from pyspark.ml.evaluation import ClusteringEvaluator
# Evaluate clustering by computing Within Set Sum of Squared Errors.
evaluator = ClusteringEvaluator()
# Compute cost
model.summary.trainingCost
# Shows the result.
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
final_data.show()
###Output
+--------------------+
| features|
+--------------------+
| (3,[],[])|
|(3,[0,1,2],[0.1,0...|
|(3,[0,1,2],[0.2,0...|
|(3,[0,1,2],[9.0,9...|
|(3,[0,1,2],[9.1,9...|
|(3,[0,1,2],[9.2,9...|
+--------------------+
###Markdown
Alright let's code through our own example!
###Code
results = model.transform(final_data)
results.show()
###Output
+--------------------+----------+
| features|prediction|
+--------------------+----------+
| (3,[],[])| 1|
|(3,[0,1,2],[0.1,0...| 1|
|(3,[0,1,2],[0.2,0...| 2|
|(3,[0,1,2],[9.0,9...| 0|
|(3,[0,1,2],[9.1,9...| 0|
|(3,[0,1,2],[9.2,9...| 0|
+--------------------+----------+
|
assignments/04/analysis.ipynb
|
###Markdown
Graph 1x: Beam sizey1: BLEUy2: Brevity Penalty
###Code
fig, ax1 = plt.subplots(1)
ax1.plot(alpha0["beam_size"], alpha0["bleu"], color="blue")
ax1.set_xlabel("Beam size")
ax1.set_ylabel("BLEU", color="blue")
ax1.tick_params("y", labelcolor="blue")
ax2 = ax1.twinx()
ax2.plot(alpha0["beam_size"], alpha0["bp"], color="red")
ax2.set_ylabel("Brevity penalty", color="red")
ax2.tick_params("y", labelcolor="red")
alphabeam = df.pivot(index="beam_size", columns="alpha", values="bleu")
alphabeam = alphabeam.transpose().sort_values("alpha", ascending=False)
fig, ax = plt.subplots(figsize=(7,7))
im = ax.imshow(alphabeam, cmap="Blues")
ax.set_xticks(range(len(alphabeam.columns)))
ax.set_xticklabels([str(int(i)) for i in alphabeam.columns])
ax.set_xlabel("Beam size")
ax.set_yticks(range(len(alphabeam.index.values)))
ax.set_yticklabels([str(i) for i in alphabeam.index.values])
ax.set_ylabel("Alpha")
ax.set_title("BLEU for different beam size/alpha configurations")
fig.tight_layout()
plt.colorbar(im, shrink=0.5)
plt.show()
###Output
_____no_output_____
|
notebooks/assignment5_notebook.ipynb
|
###Markdown
Assignment 5: (Un)supervised Machine Learning__Task:__ Train an LDA model on your data to extract structured information that can provide insight into your data. For example, maybe you are interested in seeing how different authors cluster together or how concepts change over time in this dataset.
###Code
# standard library
import sys,os
sys.path.append(os.path.join(".."))
from pprint import pprint
# data and nlp
import pandas as pd
import spacy
nlp = spacy.load("en_core_web_sm", disable=["ner"])
nlp.max_length = 67000000
# visualisation
import pyLDAvis.gensim
pyLDAvis.enable_notebook()
import seaborn as sns
from matplotlib import rcParams
# figure size in inches
rcParams['figure.figsize'] = 20,10
# LDA tools
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
from utils import lda_utils
# warnings
import logging, warnings
warnings.filterwarnings('ignore')
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)
###Output
/home/cds-au617836/cds-language-forked/lang101/lib/python3.6/site-packages/scipy/sparse/sparsetools.py:21: DeprecationWarning: `scipy.sparse.sparsetools` is deprecated!
scipy.sparse.sparsetools is a private module for scipy.sparse, and should not be used.
_deprecated()
[nltk_data] Downloading package stopwords to /home/cds-
[nltk_data] au617836/nltk_data...
[nltk_data] Package stopwords is already up-to-date!
###Markdown
__Load text__
###Code
filename = os.path.join("..", "assignments", "data", "trumptweets.csv")
tweets_df = pd.read_csv(filename,
lineterminator = "\n")
tweets_df = tweets_df.loc[:, ("id", "content", "date")]
tweets_df.head()
###Output
_____no_output_____
###Markdown
__Create chunks of 10 sentences at a time__
###Code
# Create empty list for chunks of tweets
chunks = []
# Loop through the tweets and create chunks of 10 tweets
for i in range(0, len(tweets_df["content"]), 10):
chunks.append(' '.join(tweets_df["content"][i:i+10]))
chunks[:10]
len(chunks)
###Output
_____no_output_____
###Markdown
__Process using ```gensim```__
###Code
bigram = gensim.models.Phrases(chunks, min_count=3, threshold=50)
trigram = gensim.models.Phrases(bigram[chunks], threshold=50)
bigram_mod = gensim.models.phrases.Phraser(bigram)
trigram_mod = gensim.models.phrases.Phraser(trigram)
# Lemmatizing and part-of-speech tagging
data_processed = lda_utils.process_words(chunks,
nlp,
bigram_mod,
trigram_mod,
allowed_postags=["NOUN"]) # we only keep nouns as part-of-speech
data_processed[:5]
###Output
_____no_output_____
###Markdown
__Create bag of words__
###Code
# Create Dictionary
id2word = corpora.Dictionary(data_processed)
# Create Corpus: Term Document Frequency
corpus = [id2word.doc2bow(text) for text in data_processed]
corpus[:5]
###Output
_____no_output_____
###Markdown
__LDA Model__
###Code
lda_model = gensim.models.LdaMulticore(corpus=corpus,
id2word=id2word,
num_topics=10,
random_state=100,
chunksize=10,
passes=10,
iterations=100,
per_word_topics=True,
minimum_probability=0.0)
###Output
_____no_output_____
###Markdown
__Calculate perplexity and coherence__
###Code
print('\nPerplexity: ', lda_model.log_perplexity(corpus))
coherence_model_lda = CoherenceModel(model=lda_model,
texts=data_processed,
dictionary=id2word,
coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
###Output
Perplexity: -7.874119269543192
Coherence Score: 0.4038353135586673
###Markdown
__Inspect topics__
###Code
pprint(lda_model.print_topics())
###Output
[(0,
'0.160*"com" + 0.114*"status" + 0.086*"twitter" + 0.068*"https" + '
'0.062*"pic" + 0.013*"scam" + 0.008*"tonight" + 0.008*"crime" + '
'0.008*"seanhannity" + 0.008*"realdonaldtrump"'),
(1,
'0.035*"source" + 0.027*"email" + 0.024*"agent" + 0.021*"charge" + '
'0.021*"police" + 0.020*"crime" + 0.015*"doubt" + 0.014*"information" + '
'0.012*"paper" + 0.011*"vision"'),
(2,
'0.034*"people" + 0.032*"country" + 0.020*"job" + 0.020*"border" + '
'0.017*"time" + 0.017*"year" + 0.015*"vote" + 0.014*"news" + 0.012*"deal" + '
'0.011*"today"'),
(3,
'0.062*"thank" + 0.023*"realdonaldtrump" + 0.023*"bit" + 0.014*"love" + '
'0.014*"http" + 0.014*"course" + 0.014*"show" + 0.013*"time" + 0.013*"trump" '
'+ 0.013*"work"'),
(4,
'0.082*"vet" + 0.037*"decade" + 0.026*"voting" + 0.019*"representative" + '
'0.018*"luck" + 0.017*"stop" + 0.015*"pol" + 0.015*"inspiration" + '
'0.015*"let" + 0.012*"duty"'),
(5,
'0.084*"crowd" + 0.030*"celebapprentice" + 0.028*"intelligence" + '
'0.025*"charity" + 0.024*"gang" + 0.023*"witness" + 0.023*"side" + '
'0.020*"storm" + 0.020*"legislation" + 0.019*"loophole"'),
(6,
'0.105*"dem" + 0.088*"crime" + 0.042*"post" + 0.034*"impeachment" + '
'0.028*"member" + 0.023*"phone" + 0.019*"conversation" + 0.018*"officer" + '
'0.014*"voter" + 0.009*"amendment"'),
(7,
'0.033*"medium" + 0.019*"year" + 0.017*"people" + 0.015*"today" + '
'0.014*"immigration" + 0.014*"time" + 0.014*"http" + 0.014*"bit" + '
'0.012*"news" + 0.011*"job"'),
(8,
'0.098*"witch" + 0.072*"hunt" + 0.027*"reform" + 0.027*"court" + '
'0.025*"mind" + 0.018*"obstruction" + 0.017*"mueller" + 0.017*"view" + '
'0.015*"side" + 0.015*"healthcare"'),
(9,
'0.153*"com" + 0.090*"www" + 0.055*"http" + 0.043*"trump" + 0.021*"schedule" '
'+ 0.021*"video" + 0.016*"hater" + 0.014*"article" + 0.014*"room" + '
'0.014*"matter"')]
###Markdown
__Run model multiple times to find best fit__
###Code
model_list, coherence_values = lda_utils.compute_coherence_values(texts=data_processed,
corpus=corpus,
dictionary=id2word,
start=5,
limit=40,
step=5)
###Output
_____no_output_____
###Markdown
__Most dominant topic per chunk__
###Code
df_topic_keywords = lda_utils.format_topics_sentences(ldamodel=lda_model,
corpus=corpus,
texts=data_processed)
# Format
df_dominant_topic = df_topic_keywords.reset_index()
df_dominant_topic.columns = ['Chunk_No', 'Dominant_Topic', 'Topic_Perc_Contrib', 'Keywords', 'Text']
df_dominant_topic.head(10)
# Display setting to show more characters in column
pd.options.display.max_colwidth = 100
sent_topics_sorteddf = pd.DataFrame()
sent_topics_outdf_grpd = df_topic_keywords.groupby('Dominant_Topic')
for i, grp in sent_topics_outdf_grpd:
sent_topics_sorteddf = pd.concat([sent_topics_sorteddf,
grp.sort_values(['Perc_Contribution'], ascending=False).head(1)],
axis=0)
# Reset Index
sent_topics_sorteddf.reset_index(drop=True, inplace=True)
# Format
sent_topics_sorteddf.columns = ['Topic_Num', "Topic_Perc_Contrib", "Keywords", "Representative Text"]
# Show
sent_topics_sorteddf.head(10)
###Output
_____no_output_____
###Markdown
__Visualizations with pyLDAvis__
###Code
vis = pyLDAvis.gensim.prepare(lda_model, corpus, dictionary=lda_model.id2word)
vis
###Output
_____no_output_____
###Markdown
__Visualizations with seaborn__
###Code
# Preprocessing
values = list(lda_model.get_document_topics(corpus))
split = []
for entry in values:
topic_prevelance = []
for topic in entry:
topic_prevelance.append(topic[1])
split.append(topic_prevelance)
df = pd.DataFrame(map(list,zip(*split)))
sns.lineplot(data=df.T.rolling(50).mean())
###Output
_____no_output_____
|
nb/timer.ipynb
|
###Markdown
output
###Code
common.save_and_export_notebook('timer')
###Output
_____no_output_____
|
Cap09/01_Conhecendo_o_Plotly.ipynb
|
###Markdown
___ Capítulo 9 - Visualização de dados com Plotly Seção 1 - Conhecendo o Plotly___ Configurando o Plotly dentro do Jupyter
###Code
! pip install cufflinks
import pandas as pd
import cufflinks as cf
import plotly.tools as tls
tls.set_credentials_file(username='jtemporal', api_key='44QHB1wF7FdepdHM7AIx')
caminho = 'C:/Users/booth13-mgr2/Desktop/roteiros/dados/mtcars.csv'
carros = pd.read_csv(caminho)
carros.columns = ['car_names','mpg','cyl','disp', 'hp', 'drat', 'wt', 'qsec', 'vs', 'am', 'gear', 'carb']
df = carros[['cyl', 'wt','mpg']]
###Output
_____no_output_____
###Markdown
Criando um grafico de linhas com DataFrame Pandas
###Code
layout = dict(title='Gráfico de um DataFrame Pandas',
xaxis=dict(title='eixo-x'),
yaxis=dict(title='eixo-y'))
df.iplot(filename='grafico-de-linha', layout=layout)
###Output
_____no_output_____
|
FeatureEngineering_DataScience/Demo180_RareCategories_OnePredominantCategory.ipynb
|
###Markdown
Rare Labels - Values present for a small percentage - Usually present less than 5% - Concept of cardinality Rare label consequences - May add information in low cardinality - May add noise is high cardinality Engineering Rare Labels - Replacing by most frequent label- Grouping all rare labels together Categorical variables can have:- One predominant category- A small number of categories- High cardinality
###Code
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from google.colab import drive
drive.mount('/content/gdrive')
data = pd.read_csv("gdrive/My Drive/Colab Notebooks/FeatureEngineering/trainh.csv")
data.head()
data.columns
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data[data.columns[:-1]], data.SalePrice,
test_size=0.2)
X_train.shape, X_test.shape
def label_encoder(X_train, X_test, columns, na_flag = False):
for col in columns:
mapper = {k:i for i, k in enumerate(X_train[col].unique(), 0)}
if na_flag:
mapper[np.nan] = np.nan
X_train.loc[:, col] = X_train.loc[:, col].map(mapper)
X_test.loc[:, col] = X_test.loc[:, col].map(mapper)
X_train['Street'].head()
print(X_train['Street'].value_counts())
X_train['Street'].isnull().sum()
label_encoder(X_train, X_test, ['Street'])
X_train['Street'].head()
def regressor(X_train, y_train, X_test, y_test, cols, model):
from sklearn.metrics import mean_squared_error
model.fit(X_train[cols],y_train)
y_pred = model.predict(X_test[cols])
print(mean_squared_error(y_test, y_pred))
from sklearn.linear_model import LinearRegression
model = LinearRegression()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.linear_model import RidgeCV
model = RidgeCV()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.linear_model import Ridge
model = RidgeCV()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.svm import SVR
model = SVR()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.neural_network import MLPRegressor
model = MLPRegressor()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.svm import LinearSVR
model = LinearSVR()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
from sklearn.linear_model import SGDRegressor
model = SGDRegressor()
regressor(X_train, y_train, X_test, y_test, ['Street'], model)
# get number of categories in variables
categoricals = []
for col in data.columns:
if data[col].dtypes =='O':
print('{} categories : {} '.format(col, len(data[col].unique())))
categoricals.append(col)
# Get variables with less than n categories
n = 4
low_cardinals = []
for col in data.columns:
if data[col].dtypes =='O':
if len(data[col].unique())<n:
print('{} categories : {} '.format(col, len(data[col].unique())))
low_cardinals.append(col)
for col in low_cardinals:
if data[col].dtypes =='O': # if the variable is categorical
print(100*data.groupby(col)[col].count()/np.float(len(data)))
print()
label_encoder(X_train, X_test, low_cardinals)
for col in low_cardinals:
print(100*X_train.groupby(col)[col].count()/np.float(len(X_train)))
print()
from sklearn.linear_model import LinearRegression
model = LinearRegression()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.linear_model import RidgeCV
model = RidgeCV()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.linear_model import Ridge
model = RidgeCV()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.svm import SVR
model = SVR()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.neural_network import MLPRegressor
model = MLPRegressor()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.svm import LinearSVR
model = LinearSVR()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
from sklearn.linear_model import SGDRegressor
model = SGDRegressor()
regressor(X_train, y_train, X_test, y_test, low_cardinals, model)
###Output
5322207820.247289
|
(codes NN + CNN)/Part 1 Fake News Classifier NN & CNN.ipynb
|
###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive')
#Importing the libraries which are required.
import pandas as pd
import numpy as np
import string
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings("ignore")
import keras
from keras.utils.np_utils import to_categorical
from sklearn import metrics
from sklearn.metrics import classification_report
li = [5, 7, 25]
f = list(filter(lambda x: x*2 , li))
f
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
import pandas as pd
header = ['text','SECTION_CLEANED','label']
df = pd.read_csv('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/FAKE VS REAL NEWS - FINAL DATASETS/dfcleanedascsv.csv', usecols = header)
df = pd.DataFrame(df)
df.head()
print(df.shape)
df.tail()
df.dropna(inplace=True)
print(df.shape)
label_count = df.label.value_counts()
print('Class 0:', label_count[0])
print('Class 1:', label_count[1])
print('Proportion:', round(label_count[0] / label_count[1], 2), ': 1')
label_count.plot(kind='bar', title='Count (label)')
# Summarize number of words
print("Number of words: ")
print(len(np.unique(np.hstack(df['SECTION_CLEANED'].values))))
df['SECTION_CLEANED'] = df['SECTION_CLEANED'].astype(str)
df['label'] = df['label'].replace(to_replace = ['FAKE', 'REAL'], value = [0,1])
df.head()
###Output
_____no_output_____
###Markdown
If you get any stripes in the next figure, then you have some missing values in the dataset.
###Code
import seaborn as sns
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='YlGnBu')
from sklearn.model_selection import train_test_split
sentences = df['SECTION_CLEANED'].values
y = df['label'].values
sentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.20, random_state=1000)
print(sentences_train.shape, sentences_test.shape)
print(y_train.shape, y_test.shape)
# Summarize number of words
print("Number of words: ")
print(len(np.unique(np.hstack(sentences_train))))
X = np.concatenate((sentences_train, sentences_test), axis=0)
len(X[0])
# Summarize review length {price:.2f}
print("Review length: ")
result = list(map(len,X))
print("Mean %.2f words (%f)" % (np.mean(result), np.std(result)))
#print("Mean {} words {}".format(np.mean(result), np.std(result)))
# plot review length as a boxplot and histogram
plt.figure(figsize=(15,8))
plt.subplot(211)
plt.boxplot(result)
plt.subplot(212)
plt.hist(result)
plt.show()
# Import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
y_train
print(y_train[0:5])
y_train = keras.utils.to_categorical(y_train, num_classes=2, dtype='int32')
y_test = keras.utils.to_categorical(y_test, num_classes=2, dtype='int32')
y_train[0:5]
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
try:
from tensorflow.python.util import module_wrapper as deprecation
except ImportError:
from tensorflow.python.util import deprecation_wrapper as deprecation
deprecation._PER_MODULE_WARNING_LIMIT = 0
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_history(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
###Output
_____no_output_____
###Markdown
A Simple Neural Network
###Code
from keras.models import Sequential
from keras import layers
from keras.layers import Dropout, BatchNormalization, Dense
from keras import *
input_dim = X_train.shape[1] # Number of features
print(input_dim)
model = Sequential()
model.add(layers.Dense(96, input_dim=input_dim, activation='relu'))
model.add(BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(32,activation='relu'))
model.add(BatchNormalization())
model.add(layers.Dropout(0.6))
model.add(layers.Dense(2, activation='softmax')) #initially 1 , sigmoid
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
%%time
history = model.fit(X_train, y_train,epochs=11,verbose=1,validation_data=(X_test, y_test))
from sklearn import metrics
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
print("CONFUSION MATRIX")
pred = model.predict(X_test)
cm = metrics.confusion_matrix(np.argmax(y_test, axis= 1),np.argmax(pred, axis = 1), labels=[0,1]) # 'FAKE','REAL'
print(cm)
plot_history(history)
# Save the weights
model.save_weights('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/fakenews_NN_model_weights.h5')
# Save the model architecture
with open('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/fakenews_NN_model_architecture.json', 'w') as f:
f.write(model.to_json())
###Output
_____no_output_____
###Markdown
A Convolutional Neural Network
###Code
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer(num_words=5000, lower=True)
tokenizer.fit_on_texts(sentences_train)
X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)
vocab_size = len(tokenizer.word_index) + 1
with open('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/CNN_tokenizer.pkl', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
from keras.preprocessing.sequence import pad_sequences
maxlen = 1000 # 800
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
print(X_train[200])
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from keras.layers import Dense, Flatten, Conv1D, Dropout, BatchNormalization
from keras.layers import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras import *
from keras.callbacks import *
embedding_dim = 500
model = Sequential()
model.add(layers.Embedding(input_dim = vocab_size , output_dim = embedding_dim ,input_length = maxlen))
model.add(layers.Conv1D(filters=32, kernel_size=4, padding = 'valid' ,activation= 'relu' ))
model.add(layers.MaxPooling1D(pool_size=2))
model.add(layers.Conv1D(filters=64, kernel_size=5, padding = 'valid' ,activation= 'relu' ))
model.add(layers.MaxPooling1D(pool_size=2))
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(512, activation= 'relu' ))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(2, activation= 'softmax' ))
# Important Note - About the next line, needs to be taken care of.
#model.load_weights("/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/reloader_weights.h5")
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.summary()
%%time
#red_lr= ReduceLROnPlateau(monitor='val_loss', patience = 3 , verbose = 2 , factor = 0.5 , min_delta = 0.01 )
check=ModelCheckpoint(filepath=r'/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/FAKENEWS_CNN_MODEL.h5', verbose=1 , monitor='val_acc',save_best_only=True, mode='max')
history = model.fit(X_train, y_train , epochs = 20 , verbose = 1 , callbacks = [check], validation_data=(X_test, y_test))
model.save_weights("/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/reloader_weights.h5")
print("Saved model to disk")
from sklearn import metrics
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
print("CONFUSION MATRIX")
pred = model.predict(X_test)
cm = metrics.confusion_matrix(np.argmax(y_test, axis= 1),np.argmax(pred, axis = 1), labels=[0,1]) # 'FAKE','REAL'
print(cm)
#CONFUSION MATRIX
#[578 77]
#[ 72 663]
print(classification_report(y_test.argmax(axis=1),pred.argmax(axis=1), target_names=['FAKE','REAL']))
plot_history(history)
###Output
Training Accuracy: 0.9849
Testing Accuracy: 0.8763
CONFUSION MATRIX
[[585 70]
[102 633]]
precision recall f1-score support
FAKE 0.85 0.89 0.87 655
REAL 0.90 0.86 0.88 735
accuracy 0.88 1390
macro avg 0.88 0.88 0.88 1390
weighted avg 0.88 0.88 0.88 1390
###Markdown
CONCATTED CONVOLUTIONAL MULTI MODEL ARCHITECTURE
###Code
'''
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from keras.layers import Dense, Flatten, Conv1D, Dropout, BatchNormalization
from keras.layers import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.utils.vis_utils import plot_model
from keras.layers.merge import concatenate
from keras import *
from keras.callbacks import *
def model_arc1(length, vocab_size):
embedding_dim = 500
# channel 1
inputs1 = Input(shape=(length,))
embedding1 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs1)
conv1 = Conv1D(filters=32, kernel_size=3, activation='relu')(embedding1)
pool1 = MaxPooling1D(pool_size=2)(conv1)
flat1 = Flatten()(pool1)
# channel 2
inputs2 = Input(shape=(length,))
embedding2 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs2)
conv2 = Conv1D(filters=64, kernel_size=4, activation='relu')(embedding2)
pool2 = MaxPooling1D(pool_size=2)(conv2)
flat2 = Flatten()(pool2)
# channel 3
inputs3 = Input(shape=(length,))
embedding3 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs3)
conv3 = Conv1D(filters=256, kernel_size=5, activation='relu')(embedding3)
pool3 = MaxPooling1D(pool_size=2)(conv3)
flat3 = Flatten()(pool3)
# merge
merged = concatenate([flat1, flat2, flat3])
# interpretation
bn1 = BatchNormalization()(merged)
dense1 = Dense(512, activation='relu')(bn1)
bn1 = BatchNormalization()(dense1)
drop4 = Dropout(0.5)(bn1)
outputs = Dense(2, activation='softmax')(drop4)
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
# compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
#plot_model(model, show_shapes=True, to_file='/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/model_concatted_convolution.png')
return model
def model_arc2(length, vocab_size):
embedding_dim = 500
# channel 1
inputs1 = Input(shape=(length,))
embedding1 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs1)
conv1 = Conv1D(filters=32, kernel_size=4, activation='relu')(embedding1)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling1D(pool_size=2)(drop1)
flat1 = Flatten()(pool1)
# channel 2
inputs2 = Input(shape=(length,))
embedding2 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs2)
conv2 = Conv1D(filters=32, kernel_size=6, activation='relu')(embedding2)
drop2 = Dropout(0.5)(conv2)
pool2 = MaxPooling1D(pool_size=2)(drop2)
flat2 = Flatten()(pool2)
# channel 3
inputs3 = Input(shape=(length,))
embedding3 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs3)
conv3 = Conv1D(filters=32, kernel_size=8, activation='relu')(embedding3)
drop3 = Dropout(0.5)(conv3)
pool3 = MaxPooling1D(pool_size=2)(drop3)
flat3 = Flatten()(pool3)
# merge
merged = concatenate([flat1, flat2, flat3])
# interpretation
dense1 = Dense(10, activation='relu')(merged)
outputs = Dense(2, activation='softmax')(dense1)
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
# compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize
#model.summary()
#plot_model(model, show_shapes=True, to_file='model.png')
return model
# Initial Filters are 32 , 32 , 32 : "Acc " - 98.7 & 89 %
'''
%%time
model_concat = model_arc1(maxlen, vocab_size)
print()
red_lr= ReduceLROnPlateau(monitor='val_loss', patience = 5 , verbose = 2 , factor = 0.5 , min_delta = 0.01 )
check=ModelCheckpoint(filepath=r'/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/FAKENEWS_CNN_CONCAT_MODELx2.h5', verbose=1 , monitor='val_acc',save_best_only=True, mode='max')
history = model_concat.fit([X_train,X_train,X_train], y_train , epochs = 10 , verbose = 2 , callbacks = [red_lr, check], validation_data=([X_test,X_test,X_test], y_test), batch_size= 12)
model_concat.summary
loss, accuracy = model_concat.evaluate([X_train,X_train,X_train], y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model_concat.evaluate([X_test,X_test,X_test], y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
from sklearn import metrics
print("CONFUSION MATRIX")
pred = model_concat.predict([X_test,X_test,X_test])
cm = metrics.confusion_matrix(np.argmax(y_test, axis= 1),np.argmax(pred, axis = 1), labels=[0,1]) # 'FAKE','REAL'
print(cm)
#CONFUSION MATRIX
#[777 50]
#[152 759]
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
###Output
_____no_output_____
###Markdown
###Code
from google.colab import drive
drive.mount('/content/drive')
#Importing the libraries which are required.
import pandas as pd
import numpy as np
import string
import matplotlib.pyplot as plt
%matplotlib inline
import seaborn as sns
import pickle
import warnings
warnings.filterwarnings("ignore")
import keras
from keras.utils.np_utils import to_categorical
from sklearn import metrics
from sklearn.metrics import classification_report
li = [5, 7, 25]
f = list(filter(lambda x: x*2 , li))
f
# memory footprint support libraries/code
!ln -sf /opt/bin/nvidia-smi /usr/bin/nvidia-smi
!pip install gputil
!pip install psutil
!pip install humanize
import psutil
import humanize
import os
import GPUtil as GPU
GPUs = GPU.getGPUs()
# XXX: only one GPU on Colab and isn’t guaranteed
gpu = GPUs[0]
def printm():
process = psutil.Process(os.getpid())
print("Gen RAM Free: " + humanize.naturalsize( psutil.virtual_memory().available ), " | Proc size: " + humanize.naturalsize( process.memory_info().rss))
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
printm()
import pandas as pd
header = ['text','SECTION_CLEANED','label']
df = pd.read_csv('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/FAKE VS REAL NEWS - FINAL DATASETS/dfcleanedascsv.csv', usecols = header)
df = pd.DataFrame(df)
df.head()
print(df.shape)
df.tail()
df.dropna(inplace=True)
print(df.shape)
label_count = df.label.value_counts()
print('Class 0:', label_count[0])
print('Class 1:', label_count[1])
print('Proportion:', round(label_count[0] / label_count[1], 2), ': 1')
label_count.plot(kind='bar', title='Count (label)')
# Summarize number of words
print("Number of words: ")
print(len(np.unique(np.hstack(df['SECTION_CLEANED'].values))))
df['SECTION_CLEANED'] = df['SECTION_CLEANED'].astype(str)
df['label'] = df['label'].replace(to_replace = ['FAKE', 'REAL'], value = [0,1])
df.head()
###Output
_____no_output_____
###Markdown
If you get any stripes in the next figure, then you have some missing values in the dataset.
###Code
import seaborn as sns
sns.heatmap(df.isnull(),yticklabels=False,cbar=False,cmap='YlGnBu')
from sklearn.model_selection import train_test_split
sentences = df['SECTION_CLEANED'].values
y = df['label'].values
sentences_train, sentences_test, y_train, y_test = train_test_split(sentences, y, test_size=0.20, random_state=1000)
print(sentences_train.shape, sentences_test.shape)
print(y_train.shape, y_test.shape)
# Summarize number of words
print("Number of words: ")
print(len(np.unique(np.hstack(sentences_train))))
X = np.concatenate((sentences_train, sentences_test), axis=0)
len(X[0])
# Summarize review length {price:.2f}
print("Review length: ")
result = list(map(len,X))
print("Mean %.2f words (%f)" % (np.mean(result), np.std(result)))
#print("Mean {} words {}".format(np.mean(result), np.std(result)))
# plot review length as a boxplot and histogram
plt.figure(figsize=(15,8))
plt.subplot(211)
plt.boxplot(result)
plt.subplot(212)
plt.hist(result)
plt.show()
# Import TfidfVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer()
vectorizer.fit(sentences_train)
X_train = vectorizer.transform(sentences_train)
X_test = vectorizer.transform(sentences_test)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
y_train
print(y_train[0:5])
y_train = keras.utils.to_categorical(y_train, num_classes=2, dtype='int32')
y_test = keras.utils.to_categorical(y_test, num_classes=2, dtype='int32')
y_train[0:5]
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import tensorflow as tf
try:
from tensorflow.python.util import module_wrapper as deprecation
except ImportError:
from tensorflow.python.util import deprecation_wrapper as deprecation
deprecation._PER_MODULE_WARNING_LIMIT = 0
from tensorflow.python.util import deprecation
deprecation._PRINT_DEPRECATION_WARNINGS = False
import matplotlib.pyplot as plt
plt.style.use('ggplot')
def plot_history(history):
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
###Output
_____no_output_____
###Markdown
A Simple Neural Network
###Code
from keras.models import Sequential
from keras import layers
from keras.layers import Dropout, BatchNormalization, Dense
from keras import *
input_dim = X_train.shape[1] # Number of features
print(input_dim)
model = Sequential()
model.add(layers.Dense(96, input_dim=input_dim, activation='relu'))
model.add(BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(32,activation='relu'))
model.add(BatchNormalization())
model.add(layers.Dropout(0.6))
model.add(layers.Dense(2, activation='softmax')) #initially 1 , sigmoid
model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy'])
model.summary()
%%time
history = model.fit(X_train, y_train,epochs=11,verbose=1,validation_data=(X_test, y_test))
from sklearn import metrics
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
print("CONFUSION MATRIX")
pred = model.predict(X_test)
cm = metrics.confusion_matrix(np.argmax(y_test, axis= 1),np.argmax(pred, axis = 1), labels=[0,1]) # 'FAKE','REAL'
print(cm)
plot_history(history)
# Save the weights
model.save_weights('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/fakenews_NN_model_weights.h5')
# Save the model architecture
with open('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/fakenews_NN_model_architecture.json', 'w') as f:
f.write(model.to_json())
###Output
_____no_output_____
###Markdown
A Convolutional Neural Network
###Code
from keras.preprocessing.text import Tokenizer
tokenizer = Tokenizer(num_words=5000, lower=True)
tokenizer.fit_on_texts(sentences_train)
X_train = tokenizer.texts_to_sequences(sentences_train)
X_test = tokenizer.texts_to_sequences(sentences_test)
vocab_size = len(tokenizer.word_index) + 1
with open('/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/CNN_tokenizer.pkl', 'wb') as handle:
pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
from keras.preprocessing.sequence import pad_sequences
maxlen = 1000 # 800
X_train = pad_sequences(X_train, padding='post', maxlen=maxlen)
X_test = pad_sequences(X_test, padding='post', maxlen=maxlen)
print(X_train[200])
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from keras.layers import Dense, Flatten, Conv1D, Dropout, BatchNormalization
from keras.layers import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras import *
from keras.callbacks import *
embedding_dim = 500
model = Sequential()
model.add(layers.Embedding(input_dim = vocab_size , output_dim = embedding_dim ,input_length = maxlen))
model.add(layers.Conv1D(filters=32, kernel_size=4, padding = 'valid' ,activation= 'relu' ))
model.add(layers.MaxPooling1D(pool_size=2))
model.add(layers.Conv1D(filters=64, kernel_size=5, padding = 'valid' ,activation= 'relu' ))
model.add(layers.MaxPooling1D(pool_size=2))
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(512, activation= 'relu' ))
model.add(layers.BatchNormalization())
model.add(layers.Dropout(0.4))
model.add(layers.Dense(2, activation= 'softmax' ))
# Important Note - About the next line, needs to be taken care of.
#model.load_weights("/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/reloader_weights.h5")
model.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
model.summary()
%%time
#red_lr= ReduceLROnPlateau(monitor='val_loss', patience = 3 , verbose = 2 , factor = 0.5 , min_delta = 0.01 )
check=ModelCheckpoint(filepath=r'/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/FAKENEWS_CNN_MODEL.h5', verbose=1 , monitor='val_acc',save_best_only=True, mode='max')
history = model.fit(X_train, y_train , epochs = 20 , verbose = 1 , callbacks = [check], validation_data=(X_test, y_test))
model.save_weights("/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/reloader_weights.h5")
print("Saved model to disk")
from sklearn import metrics
loss, accuracy = model.evaluate(X_train, y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model.evaluate(X_test, y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
print("CONFUSION MATRIX")
pred = model.predict(X_test)
cm = metrics.confusion_matrix(np.argmax(y_test, axis= 1),np.argmax(pred, axis = 1), labels=[0,1]) # 'FAKE','REAL'
print(cm)
#CONFUSION MATRIX
#[578 77]
#[ 72 663]
print(classification_report(y_test.argmax(axis=1),pred.argmax(axis=1), target_names=['FAKE','REAL']))
plot_history(history)
###Output
Training Accuracy: 0.9849
Testing Accuracy: 0.8763
CONFUSION MATRIX
[[585 70]
[102 633]]
precision recall f1-score support
FAKE 0.85 0.89 0.87 655
REAL 0.90 0.86 0.88 735
accuracy 0.88 1390
macro avg 0.88 0.88 0.88 1390
weighted avg 0.88 0.88 0.88 1390
###Markdown
CONCATTED CONVOLUTIONAL MULTI MODEL ARCHITECTURE
###Code
'''
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from keras.layers import Dense, Flatten, Conv1D, Dropout, BatchNormalization
from keras.layers import MaxPooling1D
from keras.layers.embeddings import Embedding
from keras.preprocessing import sequence
from keras.utils.vis_utils import plot_model
from keras.layers.merge import concatenate
from keras import *
from keras.callbacks import *
def model_arc1(length, vocab_size):
embedding_dim = 500
# channel 1
inputs1 = Input(shape=(length,))
embedding1 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs1)
conv1 = Conv1D(filters=32, kernel_size=3, activation='relu')(embedding1)
pool1 = MaxPooling1D(pool_size=2)(conv1)
flat1 = Flatten()(pool1)
# channel 2
inputs2 = Input(shape=(length,))
embedding2 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs2)
conv2 = Conv1D(filters=64, kernel_size=4, activation='relu')(embedding2)
pool2 = MaxPooling1D(pool_size=2)(conv2)
flat2 = Flatten()(pool2)
# channel 3
inputs3 = Input(shape=(length,))
embedding3 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs3)
conv3 = Conv1D(filters=256, kernel_size=5, activation='relu')(embedding3)
pool3 = MaxPooling1D(pool_size=2)(conv3)
flat3 = Flatten()(pool3)
# merge
merged = concatenate([flat1, flat2, flat3])
# interpretation
bn1 = BatchNormalization()(merged)
dense1 = Dense(512, activation='relu')(bn1)
bn1 = BatchNormalization()(dense1)
drop4 = Dropout(0.5)(bn1)
outputs = Dense(2, activation='softmax')(drop4)
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
# compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
#plot_model(model, show_shapes=True, to_file='/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/model_concatted_convolution.png')
return model
def model_arc2(length, vocab_size):
embedding_dim = 500
# channel 1
inputs1 = Input(shape=(length,))
embedding1 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs1)
conv1 = Conv1D(filters=32, kernel_size=4, activation='relu')(embedding1)
drop1 = Dropout(0.5)(conv1)
pool1 = MaxPooling1D(pool_size=2)(drop1)
flat1 = Flatten()(pool1)
# channel 2
inputs2 = Input(shape=(length,))
embedding2 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs2)
conv2 = Conv1D(filters=32, kernel_size=6, activation='relu')(embedding2)
drop2 = Dropout(0.5)(conv2)
pool2 = MaxPooling1D(pool_size=2)(drop2)
flat2 = Flatten()(pool2)
# channel 3
inputs3 = Input(shape=(length,))
embedding3 = Embedding(input_dim = vocab_size, output_dim = embedding_dim)(inputs3)
conv3 = Conv1D(filters=32, kernel_size=8, activation='relu')(embedding3)
drop3 = Dropout(0.5)(conv3)
pool3 = MaxPooling1D(pool_size=2)(drop3)
flat3 = Flatten()(pool3)
# merge
merged = concatenate([flat1, flat2, flat3])
# interpretation
dense1 = Dense(10, activation='relu')(merged)
outputs = Dense(2, activation='softmax')(dense1)
model = Model(inputs=[inputs1, inputs2, inputs3], outputs=outputs)
# compile
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# summarize
#model.summary()
#plot_model(model, show_shapes=True, to_file='model.png')
return model
# Initial Filters are 32 , 32 , 32 : "Acc " - 98.7 & 89 %
'''
%%time
model_concat = model_arc1(maxlen, vocab_size)
print()
red_lr= ReduceLROnPlateau(monitor='val_loss', patience = 5 , verbose = 2 , factor = 0.5 , min_delta = 0.01 )
check=ModelCheckpoint(filepath=r'/content/drive/My Drive/Machine Learning Projects/FAKE VS REAL NEWS CATEGORY CLASSIFIER PROJECT/SOURCE CODES AND DATASETS/PACKAGE 2 - SOURCE CODES AND FILES/Pretrained Models/FAKENEWS_CNN_CONCAT_MODELx2.h5', verbose=1 , monitor='val_acc',save_best_only=True, mode='max')
history = model_concat.fit([X_train,X_train,X_train], y_train , epochs = 10 , verbose = 2 , callbacks = [red_lr, check], validation_data=([X_test,X_test,X_test], y_test), batch_size= 12)
model_concat.summary
loss, accuracy = model_concat.evaluate([X_train,X_train,X_train], y_train, verbose=False)
print("Training Accuracy: {:.4f}".format(accuracy))
loss, accuracy = model_concat.evaluate([X_test,X_test,X_test], y_test, verbose=False)
print("Testing Accuracy: {:.4f}".format(accuracy))
from sklearn import metrics
print("CONFUSION MATRIX")
pred = model_concat.predict([X_test,X_test,X_test])
cm = metrics.confusion_matrix(np.argmax(y_test, axis= 1),np.argmax(pred, axis = 1), labels=[0,1]) # 'FAKE','REAL'
print(cm)
#CONFUSION MATRIX
#[777 50]
#[152 759]
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, 'b', label='Training acc')
plt.plot(x, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, 'b', label='Training loss')
plt.plot(x, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
###Output
_____no_output_____
|
AAAI/Interpretability/attention_sparsity_comparison/codes_initial_comparison/last_layer_sparsemax_D_1.ipynb
|
###Markdown
Generate dataset
###Code
np.random.seed(12)
y = np.random.randint(0,10,5000)
idx= []
for i in range(10):
print(i,sum(y==i))
idx.append(y==i)
x = np.zeros((5000,2))
np.random.seed(12)
x[idx[0],:] = np.random.multivariate_normal(mean = [4,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[0]))
x[idx[1],:] = np.random.multivariate_normal(mean = [5.5,6],cov=[[0.01,0],[0,0.01]],size=sum(idx[1]))
x[idx[2],:] = np.random.multivariate_normal(mean = [4.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[2]))
x[idx[3],:] = np.random.multivariate_normal(mean = [3,3.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[3]))
x[idx[4],:] = np.random.multivariate_normal(mean = [2.5,5.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[4]))
x[idx[5],:] = np.random.multivariate_normal(mean = [3.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[5]))
x[idx[6],:] = np.random.multivariate_normal(mean = [5.5,8],cov=[[0.01,0],[0,0.01]],size=sum(idx[6]))
x[idx[7],:] = np.random.multivariate_normal(mean = [7,6.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[7]))
x[idx[8],:] = np.random.multivariate_normal(mean = [6.5,4.5],cov=[[0.01,0],[0,0.01]],size=sum(idx[8]))
x[idx[9],:] = np.random.multivariate_normal(mean = [5,3],cov=[[0.01,0],[0,0.01]],size=sum(idx[9]))
color = ['#1F77B4','orange', 'g','brown']
name = [1,2,3,0]
for i in range(10):
if i==3:
plt.scatter(x[idx[i],0],x[idx[i],1],c=color[3],label="D_"+str(name[i]))
elif i>=4:
plt.scatter(x[idx[i],0],x[idx[i],1],c=color[3])
else:
plt.scatter(x[idx[i],0],x[idx[i],1],c=color[i],label="D_"+str(name[i]))
plt.legend()
desired_num = 6000
mosaic_list_of_images =[]
mosaic_label = []
fore_idx=[]
for j in range(desired_num):
np.random.seed(j)
fg_class = np.random.randint(0,3)
fg_idx = np.random.randint(0,9)
a = []
for i in range(9):
if i == fg_idx:
b = np.random.choice(np.where(idx[fg_class]==True)[0],size=1)
a.append(x[b])
# print("foreground "+str(fg_class)+" present at " + str(fg_idx))
else:
bg_class = np.random.randint(3,10)
b = np.random.choice(np.where(idx[bg_class]==True)[0],size=1)
a.append(x[b])
# print("background "+str(bg_class)+" present at " + str(i))
a = np.concatenate(a,axis=0)
mosaic_list_of_images.append(a)
mosaic_label.append(fg_class)
fore_idx.append(fg_idx)
###Output
_____no_output_____
###Markdown
load mosaic data
###Code
class MosaicDataset(Dataset):
"""MosaicDataset dataset."""
def __init__(self, mosaic_list, mosaic_label,fore_idx):
"""
Args:
csv_file (string): Path to the csv file with annotations.
root_dir (string): Directory with all the images.
transform (callable, optional): Optional transform to be applied
on a sample.
"""
self.mosaic = mosaic_list
self.label = mosaic_label
self.fore_idx = fore_idx
def __len__(self):
return len(self.label)
def __getitem__(self, idx):
return self.mosaic[idx] , self.label[idx] , self.fore_idx[idx]
batch = 250
msd1 = MosaicDataset(mosaic_list_of_images[0:3000], mosaic_label[0:3000] , fore_idx[0:3000])
train_loader = DataLoader( msd1 ,batch_size= batch ,shuffle=True)
batch = 250
msd2 = MosaicDataset(mosaic_list_of_images[3000:6000], mosaic_label[3000:6000] , fore_idx[3000:6000])
test_loader = DataLoader( msd2 ,batch_size= batch ,shuffle=True)
###Output
_____no_output_____
###Markdown
models
###Code
class my_spherical_softmax(nn.Module):
"""
implementation is taken from https://github.com/anirbanl/sparsegen
Code for the NeurIPS 2018 paper "On Controllable Sparse Alternatives to Softmax"
"""
def __init__(self):
super(my_spherical_softmax,self).__init__()
def forward(self,x):
bs = x.data.size()[0]
dim = x.data.size()[1]
z = torch.pow(x, 2)
z_sum = torch.sum(z, dim=1).view(bs, 1).repeat(1, dim)
return z/z_sum
class Focus_deep(nn.Module):
'''
deep focus network averaged at zeroth layer
input : elemental data
'''
def __init__(self,inputs,output,K,d):
super(Focus_deep,self).__init__()
self.inputs = inputs
self.output = output
self.K = K
self.d = d
self.linear1 = nn.Linear(self.inputs,50, bias=False) #,self.output)
self.linear2 = nn.Linear(50,50 , bias=False)
self.linear3 = nn.Linear(50,self.output, bias=False)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.xavier_normal_(self.linear2.weight)
torch.nn.init.xavier_normal_(self.linear3.weight)
self.sparsemax = Sparsemax(dim=-1)
#self.spherical_softmax = my_spherical_softmax()
def forward(self,z):
batch = z.shape[0]
x = torch.zeros([batch,self.K],dtype=torch.float64)
y = torch.zeros([batch,50], dtype=torch.float64) # number of features of output
features = torch.zeros([batch,self.K,50],dtype=torch.float64)
x,y = x.to(device),y.to(device)
features = features.to(device)
for i in range(self.K):
alp,ftrs = self.helper(z[:,i] ) # self.d*i:self.d*i+self.d
x[:,i] = alp[:,0]
features[:,i] = ftrs
#log_x = F.log_softmax(x,dim=1)
x = self.sparsemax(x) # alphas
#x = F.softmax(x,dim=1)
#x = self.spherical_softmax(x)
for i in range(self.K):
x1 = x[:,i]
y = y+torch.mul(x1[:,None],features[:,i]) # self.d*i:self.d*i+self.d
return y , x
def helper(self,x):
#x1 = x
x = self.linear1(x)
x = F.relu(x)
x = self.linear2(x)
x1 = F.tanh(x)
x = F.relu(x)
x = self.linear3(x)
#print(x1.shape)
return x,x1
# class Focus_deep(nn.Module):
# '''
# deep focus network averaged at zeroth layer
# input : elemental data
# '''
# def __init__(self,inputs,output,K,d):
# super(Focus_deep,self).__init__()
# self.inputs = inputs
# self.output = output
# self.K = K
# self.d = d
# self.linear1 = nn.Linear(self.inputs,50, bias=False) #,self.output)
# self.linear2 = nn.Linear(50,50 , bias=False)
# self.linear3 = nn.Linear(50,self.output, bias=False)
# torch.nn.init.xavier_normal_(self.linear1.weight)
# torch.nn.init.xavier_normal_(self.linear2.weight)
# torch.nn.init.xavier_normal_(self.linear3.weight)
# #self.sparsemax = Sparsemax(dim=-1)
# self.spherical_softmax = my_spherical_softmax()
# def forward(self,z):
# batch = z.shape[0]
# x = torch.zeros([batch,self.K],dtype=torch.float64)
# y = torch.zeros([batch,self.d], dtype=torch.float64)
# #x,y = x.to("cuda"),y.to("cuda")
# #print(z[:,0].shape,z[:,self.d*0:self.d*0+self.d].shape)
# for i in range(self.K):
# x[:,i] = self.helper(z[:,i] )[:,0] # self.d*i:self.d*i+self.d
# #x = self.sparsemax(x) # alphas
# #x = F.softmax(x,dim=1)
# x = self.spherical_softmax(x)
# x1 = x[:,0]
# for i in range(self.K):
# x1 = x[:,i]
# y = y+torch.mul(x1[:,None],z[:,i]) # self.d*i:self.d*i+self.d
# return y , x # attended, alpha
# def helper(self,x):
# #print(x.shape)
# x = F.relu(self.linear1(x))
# x = F.relu(self.linear2(x))
# x = self.linear3(x)
# return x
fc = Focus_deep(2,1,9,2).double()
fc = fc.to(device)
fc(torch.randn((2,9,2)).double().to(device))
class Classification_deep(nn.Module):
'''
input : elemental data
deep classification module data averaged at zeroth layer
'''
def __init__(self,inputs,output):
super(Classification_deep,self).__init__()
self.inputs = inputs
self.output = output
self.linear1 = nn.Linear(self.inputs,50)
#self.linear2 = nn.Linear(6,12)
self.linear2 = nn.Linear(50,self.output)
torch.nn.init.xavier_normal_(self.linear1.weight)
torch.nn.init.zeros_(self.linear1.bias)
torch.nn.init.xavier_normal_(self.linear2.weight)
torch.nn.init.zeros_(self.linear2.bias)
def forward(self,x):
x = F.relu(self.linear1(x))
#x = F.relu(self.linear2(x))
x = self.linear2(x)
return x
criterion = nn.CrossEntropyLoss()
# def my_cross_entropy(x, y,alpha,log_alpha,k):
# # log_prob = -1.0 * F.log_softmax(x, 1)
# # loss = log_prob.gather(1, y.unsqueeze(1))
# # loss = loss.mean()
# loss = criterion(x,y)
# #alpha = torch.clamp(alpha,min=1e-10)
# b = -1.0* alpha * log_alpha
# b = torch.mean(torch.sum(b,dim=1))
# closs = loss
# entropy = b
# loss = (1-k)*loss + ((k)*b)
# return loss,closs,entropy
def calculate_attn_loss(dataloader,what,where,criter):
what.eval()
where.eval()
r_loss = 0
alphas = []
lbls = []
pred = []
fidices = []
with torch.no_grad():
for i, data in enumerate(dataloader, 0):
inputs, labels,fidx = data
lbls.append(labels)
fidices.append(fidx)
inputs = inputs.double()
inputs, labels = inputs.to(device),labels.to(device)
avg,alpha = where(inputs)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
pred.append(predicted.cpu().numpy())
alphas.append(alpha.cpu().numpy())
loss = criter(outputs,labels)
r_loss += loss.item()
alphas = np.concatenate(alphas,axis=0)
pred = np.concatenate(pred,axis=0)
lbls = np.concatenate(lbls,axis=0)
fidices = np.concatenate(fidices,axis=0)
#print(alphas.shape,pred.shape,lbls.shape,fidices.shape)
analysis = analyse_data(alphas,lbls,pred,fidices)
return r_loss/i,analysis
def analyse_data(alphas,lbls,predicted,f_idx):
'''
analysis data is created here
'''
batch = len(predicted)
amth,alth,ftpt,ffpt,ftpf,ffpf = 0,0,0,0,0,0
for j in range (batch):
focus = np.argmax(alphas[j])
if(alphas[j][focus] >= 0.5):
amth +=1
else:
alth +=1
if(focus == f_idx[j] and predicted[j] == lbls[j]):
ftpt += 1
elif(focus != f_idx[j] and predicted[j] == lbls[j]):
ffpt +=1
elif(focus == f_idx[j] and predicted[j] != lbls[j]):
ftpf +=1
elif(focus != f_idx[j] and predicted[j] != lbls[j]):
ffpf +=1
#print(sum(predicted==lbls),ftpt+ffpt)
# value>0.01
sparsity_val = np.sum(np.sum(alphas>0.01,axis=1))
# simplex distance
argmax_index = np.argmax(alphas,axis=1)
simplex_pt = np.zeros(alphas.shape)
simplex_pt[np.arange(argmax_index.size),argmax_index] = 1
shortest_distance_simplex = np.sum(np.sqrt(np.sum((alphas-simplex_pt)**2,axis=1)))
# entropy
#entropy = np.nansum((-alphas*np.log2(alphas)).sum(axis=1))
entropy = np.sum(np.nansum(-alphas*np.log2(alphas),axis=1))
return [ftpt,ffpt,ftpf,ffpf,sparsity_val,shortest_distance_simplex,entropy]#,amth,alth]
###Output
_____no_output_____
###Markdown
training
###Code
number_runs = 5
full_analysis =[]
FTPT_analysis = pd.DataFrame(columns = ["FTPT","FFPT", "FTPF","FFPF","sparsity_value","shortest distance smplx","Entropy"])
for n in range(number_runs):
print("--"*40)
# instantiate focus and classification Model
torch.manual_seed(n)
where = Focus_deep(2,1,9,2).double()
#where = where.double().to("cuda")
what = Classification_deep(50,3).double()
where = where.to(device)
what = what.to(device)
# instantiate optimizer
optimizer_where = optim.Adam(where.parameters(),lr =0.001)
optimizer_what = optim.Adam(what.parameters(), lr=0.001)
#criterion = nn.CrossEntropyLoss()
acti = []
analysis_data = []
loss_curi = []
epochs = 2500
# calculate zeroth epoch loss and FTPT values
running_loss ,anlys_data= calculate_attn_loss(train_loader,what,where,criterion)
loss_curi.append(running_loss)
analysis_data.append(anlys_data)
print('epoch: [%d ] loss: %.3f' %(0,running_loss))
# training starts
for epoch in range(epochs): # loop over the dataset multiple times
ep_lossi = []
running_loss = 0.0
what.train()
where.train()
for i, data in enumerate(train_loader, 0):
# get the inputs
inputs, labels,_ = data
inputs = inputs.double()
inputs, labels = inputs.to(device),labels.to(device)
# zero the parameter gradients
optimizer_where.zero_grad()
optimizer_what.zero_grad()
# forward + backward + optimize
avg, alpha = where(inputs)
outputs = what(avg)
loss = criterion(outputs,labels)
# print statistics
loss.backward()
optimizer_where.step()
optimizer_what.step()
running_loss += loss.item()
#break
running_loss,anls_data = calculate_attn_loss(train_loader,what,where,criterion)
analysis_data.append(anls_data)
if(epoch % 200==0):
print('epoch: [%d] loss: %.3f ' %(epoch + 1,running_loss))
loss_curi.append(running_loss) #loss per epoch
if running_loss<=0.01:
print('breaking in epoch: ', epoch)
break
print('Finished Training run ' +str(n))
#break
analysis_data = np.array(analysis_data)
FTPT_analysis.loc[n] = analysis_data[-1,:7]/3000
full_analysis.append((epoch, analysis_data))
correct = 0
total = 0
with torch.no_grad():
for data in test_loader:
images, labels,_ = data
images = images.double()
images, labels = images.to(device), labels.to(device)
avg, alpha = where(images)
outputs = what(avg)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 3000 test images: %f %%' % ( 100 * correct / total))
print(np.mean(np.array(FTPT_analysis),axis=0))
FTPT_analysis
FTPT_analysis[FTPT_analysis['FTPT']+FTPT_analysis['FFPT'] > 0.9 ]
print(np.mean(np.array(FTPT_analysis[FTPT_analysis['FTPT']+FTPT_analysis['FFPT'] > 0.9 ]),axis=0))
86.83+12.84
cnt=1
for epoch, analysis_data in full_analysis:
analysis_data = np.array(analysis_data)
# print("="*20+"run ",cnt,"="*20)
plt.figure(figsize=(6,5))
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,0]/30,label="FTPT")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,1]/30,label="FFPT")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,2]/30,label="FTPF")
plt.plot(np.arange(0,epoch+2,1),analysis_data[:,3]/30,label="FFPF")
plt.title("Training trends for run "+str(cnt))
plt.grid()
# plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
plt.legend()
plt.xlabel("epochs", fontsize=14, fontweight = 'bold')
plt.ylabel("percentage train data", fontsize=14, fontweight = 'bold')
#plt.savefig(path + "run"+str(cnt)+".png",bbox_inches="tight")
#plt.savefig(path + "run"+str(cnt)+".pdf",bbox_inches="tight")
cnt+=1
#FTPT_analysis.to_csv(path+"synthetic_zeroth.csv",index=False)
###Output
_____no_output_____
|
notebooks/thermodynamics/productionOfHydrogen.ipynb
|
###Markdown
###Code
#@title Production of hydrogen
#@markdown This page will give an introduction to production of hydrogen.
#@markdown <br><br>This document is part of the module ["Introduction to Gas Processing using NeqSim in Colab"](https://colab.research.google.com/github/EvenSol/NeqSim-Colab/blob/master/notebooks/examples_of_NeqSim_in_Colab.ipynb#scrollTo=_eRtkQnHpL70).
%%capture
!pip install neqsim
import neqsim
from neqsim.thermo.thermoTools import *
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
import math
plt.style.use('classic')
%matplotlib inline
###Output
_____no_output_____
|
W02-demo.ipynb
|
###Markdown
Poisson vs. Gaussian
###Code
from scipy.stats import norm, poisson
mu = 1000
rv_poisson = poisson(mu)
print(rv_poisson.stats(moments='mvsk'))
rv_gauss = norm(loc=mu, scale=np.sqrt(mu))
print(rv_gauss.stats(moments='mvsk'))
###Output
(array(1000.), array(1000.), array(0.), array(0.))
###Markdown
Plot them to compare
###Code
x = range(800, 1200)
plt.plot(x, rv_poisson.pmf(x), 'o')
x_to_plot = np.linspace(800, 1200, 10000)
plt.plot(x_to_plot, rv_gauss.pdf(x_to_plot))
###Output
_____no_output_____
###Markdown
--- 2-Parameter Bayesian Estimate The sampler Let's sample a 2D Gaussian:$$ PDF_G(x, y) = \frac{\exp(-\frac{1}{2} (x-\mu)^T \Sigma^{-1} (x-\mu))}{\sqrt{(2 \pi)^2 det(\Sigma)}}$$
###Code
from scipy import linalg
mu = np.array([10, 3])
rho = 0.8
sigma_x = 2
sigma_y = 1
Sigma = [[ sigma_x**2, rho * sigma_x * sigma_y],
[rho * sigma_x * sigma_y, sigma_y**2]]
def gauss_2D(x, mu, rho, sigma_x, sigma_y):
output = np.exp(-0.5 * (x-mu) @ linalg.inv(Sigma) @ (x-mu)) / np.sqrt((2 * np.pi)**2 * linalg.det(Sigma))
return output
def log_prob(x, mu, rho, sigma_x, sigma_y):
return np.log(gauss_2D(x, mu, rho, sigma_x, sigma_y))
import emcee
###Output
_____no_output_____
###Markdown
Setup the sampler
###Code
ndim = 2
nwalkers = 16
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, args=[mu, rho, sigma_x, sigma_y])
###Output
_____no_output_____
###Markdown
Starting positions
###Code
p0 = np.array([10, 3]) + np.random.normal(loc=0, scale=1, size=(nwalkers, ndim))
print(p0)
###Output
[[ 9.72959987 1.57802904]
[ 8.13136319 6.2489588 ]
[ 9.9075092 2.66610803]
[ 9.86988399 1.60814595]
[10.61877002 2.75105229]
[ 9.78837159 2.69080586]
[10.48487043 2.90957899]
[ 9.91032533 2.63895018]
[11.00082875 3.38301504]
[10.37482329 5.07756753]
[11.16405113 3.5067262 ]
[ 8.1247786 2.55736253]
[10.19247085 3.56526765]
[10.49191132 3.4890852 ]
[10.93687594 3.51127772]
[ 9.1814688 4.4475358 ]]
###Markdown
Run MCMC
###Code
sampler.run_mcmc(p0, 3000, rstate0=45);
plt.subplot(211)
plt.plot(sampler.flatchain[:,0])
plt.axhline(10, color='r')
plt.subplot(212)
plt.plot(sampler.flatchain[:,1])
plt.axhline(3, color='r')
import corner
fig = corner.corner(sampler.flatchain, labels=['x', 'y'])
###Output
_____no_output_____
###Markdown
--- Parameter Estimates Defining the sample to play with
###Code
mu = 10
sigma = 3
N_sample = 15
np.random.seed(5456345)
sample = norm.rvs(loc=mu, scale=sigma, size=N_sample)
plt.hist(sample);
np.mean(sample)
np.std(sample)
###Output
_____no_output_____
###Markdown
Doing the Bayesian analysis Need to start by defining the function to evaluate the posterior.
###Code
def log_likelihood(mu, sigma, x):
return - 0.5 * np.sum((mu - x)**2 / sigma**2 + np.log(sigma**2))
# def log_prior(mu, sigma): # This one is a flat prior
# if -100.0 < mu < 100 and -50<sigma<50:
# return 0.0
# return -np.inf
def log_prior(mu, sigma): # This one is a gaussian prior
return - (mu-10)**2/2/5**2 - (sigma-3)**2/2/1**2
def lnprob(params, x):
mu, sigma = params
lp = log_prior(mu, sigma)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(mu, sigma, x)
###Output
_____no_output_____
###Markdown
Setup de Sampler
###Code
import emcee
ndim = 2
nwalkers = 16
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[sample])
###Output
_____no_output_____
###Markdown
Setup the starting points for the set of walkers
###Code
mu_p0 = norm.rvs(loc=0, scale=10, size=(nwalkers))
sigma_p0 = poisson.rvs(30, size=nwalkers)
p0 = np.stack((mu_p0, sigma_p0), axis=1)
plt.plot(mu_p0, sigma_p0, 'x')
plt.axvline(mu)
plt.axhline(sigma)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
###Output
_____no_output_____
###Markdown
Run the sampler
###Code
sampler.run_mcmc(p0, 5000, rstate0=43);
###Output
_____no_output_____
###Markdown
Explore the chains
###Code
plt.subplot(121)
plt.hist(sampler.flatchain[:,0])
plt.xlabel(r'$\mu$')
plt.subplot(122)
plt.hist(sampler.flatchain[:,1])
plt.xlabel(r'$\sigma$');
plt.plot(sampler.flatchain[:,0])
plt.axhline(10, color='r')
plt.ylabel(r'$\mu$')
plt.plot(np.fabs(sampler.flatchain[:,1]))
plt.axhline(3, color='r')
plt.ylabel(r'$\sigma$')
fig = corner.corner(sampler.flatchain, labels=[r'$\mu$', r'$\sigma$'])
###Output
WARNING:root:Too few points to create valid contours
###Markdown
It's a fairly common problem that it looks this bad, too many outliers. The parameter `range` helps with the visualization, check the documentation [here](https://corner.readthedocs.io/en/latest/).
###Code
fig = corner.corner(sampler.flatchain, labels=[r'$\mu$', r'$\sigma$'], range=(0.98, 0.98), truths=[10, 3])
###Output
_____no_output_____
###Markdown
Poisson vs. Gaussian
###Code
from scipy.stats import norm, poisson
mu = 1000
rv_poisson = poisson(mu)
print(rv_poisson.stats(moments='mvsk'))
rv_gauss = norm(loc=mu, scale=np.sqrt(mu))
print(rv_gauss.stats(moments='mvsk'))
###Output
(array(1000.), array(1000.), array(0.), array(0.))
###Markdown
Plot them to compare
###Code
x = range(800, 1200)
plt.plot(x, rv_poisson.pmf(x), 'o')
x_to_plot = np.linspace(800, 1200, 10000)
plt.plot(x_to_plot, rv_gauss.pdf(x_to_plot))
###Output
_____no_output_____
###Markdown
--- 2-Parameter Bayesian Estimate The sampler Let's sample a 2D Gaussian:$$ PDF_G(x, y) = \frac{\exp(-\frac{1}{2} (x-\mu)^T \Sigma^{-1} (x-\mu))}{\sqrt{(2 \pi)^2 det(\Sigma)}}$$
###Code
from scipy import linalg
mu = np.array([10, 3])
rho = 0.8
sigma_x = 2
sigma_y = 1
Sigma = [[ sigma_x**2, rho * sigma_x * sigma_y],
[rho * sigma_x * sigma_y, sigma_y**2]]
def gauss_2D(x, mu, rho, sigma_x, sigma_y):
output = np.exp(-0.5 * (x-mu) @ linalg.inv(Sigma) @ (x-mu)) / np.sqrt((2 * np.pi)**2 * linalg.det(Sigma))
return output
def log_prob(x, mu, rho, sigma_x, sigma_y):
return np.log(gauss_2D(x, mu, rho, sigma_x, sigma_y))
import emcee
###Output
_____no_output_____
###Markdown
Setup the sampler
###Code
ndim = 2
nwalkers = 16
sampler = emcee.EnsembleSampler(nwalkers, ndim, log_prob, args=[mu, rho, sigma_x, sigma_y])
###Output
_____no_output_____
###Markdown
Starting positions
###Code
p0 = np.array([10, 3]) + np.random.normal(loc=0, scale=1, size=(nwalkers, ndim))
print(p0)
###Output
[[ 9.72959987 1.57802904]
[ 8.13136319 6.2489588 ]
[ 9.9075092 2.66610803]
[ 9.86988399 1.60814595]
[10.61877002 2.75105229]
[ 9.78837159 2.69080586]
[10.48487043 2.90957899]
[ 9.91032533 2.63895018]
[11.00082875 3.38301504]
[10.37482329 5.07756753]
[11.16405113 3.5067262 ]
[ 8.1247786 2.55736253]
[10.19247085 3.56526765]
[10.49191132 3.4890852 ]
[10.93687594 3.51127772]
[ 9.1814688 4.4475358 ]]
###Markdown
Run MCMC
###Code
sampler.run_mcmc(p0, 3000, rstate0=45);
plt.subplot(211)
plt.plot(sampler.flatchain[:,0])
plt.axhline(10, color='r')
plt.subplot(212)
plt.plot(sampler.flatchain[:,1])
plt.axhline(3, color='r')
import corner
fig = corner.corner(sampler.flatchain, labels=['x', 'y'])
###Output
_____no_output_____
###Markdown
--- Parameter Estimates Defining the sample to play with
###Code
mu = 10
sigma = 3
N_sample = 15
np.random.seed(5456345)
sample = norm.rvs(loc=mu, scale=sigma, size=N_sample)
plt.hist(sample);
np.mean(sample)
np.std(sample)
###Output
_____no_output_____
###Markdown
Doing the Bayesian analysis Need to start by defining the function to evaluate the posterior.
###Code
def log_likelihood(mu, sigma, x):
return - 0.5 * np.sum((mu - x)**2 / sigma**2 + np.log(sigma**2))
# def log_prior(mu, sigma): # This one is a flat prior
# if -100.0 < mu < 100 and -50<sigma<50:
# return 0.0
# return -np.inf
def log_prior(mu, sigma): # This one is a gaussian prior
return - (mu-10)**2/2/5**2 - (sigma-3)**2/2/1**2
def lnprob(params, x):
mu, sigma = params
lp = log_prior(mu, sigma)
if not np.isfinite(lp):
return -np.inf
return lp + log_likelihood(mu, sigma, x)
###Output
_____no_output_____
###Markdown
Setup de Sampler
###Code
import emcee
ndim = 2
nwalkers = 16
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, args=[sample])
###Output
_____no_output_____
###Markdown
Setup the starting points for the set of walkers
###Code
mu_p0 = norm.rvs(loc=0, scale=10, size=(nwalkers))
sigma_p0 = poisson.rvs(30, size=nwalkers)
p0 = np.stack((mu_p0, sigma_p0), axis=1)
plt.plot(mu_p0, sigma_p0, 'x')
plt.axvline(mu)
plt.axhline(sigma)
plt.xlabel(r'$\mu$')
plt.ylabel(r'$\sigma$')
###Output
_____no_output_____
###Markdown
Run the sampler
###Code
sampler.run_mcmc(p0, 5000, rstate0=43);
###Output
_____no_output_____
###Markdown
Explore the chains
###Code
plt.subplot(121)
plt.hist(sampler.flatchain[:,0])
plt.xlabel(r'$\mu$')
plt.subplot(122)
plt.hist(sampler.flatchain[:,1])
plt.xlabel(r'$\sigma$');
plt.plot(sampler.flatchain[:,0])
plt.axhline(10, color='r')
plt.ylabel(r'$\mu$')
plt.plot(np.fabs(sampler.flatchain[:,1]))
plt.axhline(3, color='r')
plt.ylabel(r'$\sigma$')
fig = corner.corner(sampler.flatchain, labels=[r'$\mu$', r'$\sigma$'])
###Output
WARNING:root:Too few points to create valid contours
###Markdown
It's a fairly common problem that it looks this bad, too many outliers. The parameter `range` helps with the visualization, check the documentation [here](https://corner.readthedocs.io/en/latest/).
###Code
fig = corner.corner(sampler.flatchain, labels=[r'$\mu$', r'$\sigma$'], range=(0.98, 0.98), truths=[10, 3])
###Output
_____no_output_____
|
023_keras_optimizations.ipynb
|
###Markdown
Keras and Tensorflow OptimizationsThere are several things that we can do to make our networks a bit better. Unfortunately for much of this there aren't definitive answers for "what is the best choice", so we do have to do some trial and error, but we can use some guidelines to get us started in the right direction. Load MNIST DataWe can use the MNIST digit dataset for testing, since it is reasonably large.
###Code
# Load MNIST dataset
mnist = keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# Normalize the input image so that each pixel value is between 0 and 1.
train_images = train_images / 255.0
test_images = test_images / 255.0
print(train_images.shape)
train_labels = np_utils.to_categorical(train_labels)
test_labels = np_utils.to_categorical(test_labels)
model = keras.Sequential()
model.add(InputLayer(input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(10, activation="softmax"))
# Train the digit classification model
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics='accuracy')
model.fit(
train_images,
train_labels,
epochs=10,
validation_split=0.2,
)
model.evaluate(test_images, test_labels)
###Output
Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11493376/11490434 [==============================] - 0s 0us/step
11501568/11490434 [==============================] - 0s 0us/step
(60000, 28, 28)
Epoch 1/10
1500/1500 [==============================] - 15s 8ms/step - loss: 0.2218 - accuracy: 0.9341 - val_loss: 0.1195 - val_accuracy: 0.9653
Epoch 2/10
1500/1500 [==============================] - 7s 4ms/step - loss: 0.0889 - accuracy: 0.9732 - val_loss: 0.0889 - val_accuracy: 0.9732
Epoch 3/10
1500/1500 [==============================] - 5s 4ms/step - loss: 0.0573 - accuracy: 0.9824 - val_loss: 0.0933 - val_accuracy: 0.9718
Epoch 4/10
1500/1500 [==============================] - 6s 4ms/step - loss: 0.0395 - accuracy: 0.9878 - val_loss: 0.0839 - val_accuracy: 0.9750
Epoch 5/10
1500/1500 [==============================] - 6s 4ms/step - loss: 0.0280 - accuracy: 0.9915 - val_loss: 0.0832 - val_accuracy: 0.9780
Epoch 6/10
1500/1500 [==============================] - 6s 4ms/step - loss: 0.0202 - accuracy: 0.9940 - val_loss: 0.0807 - val_accuracy: 0.9794
Epoch 7/10
1500/1500 [==============================] - 6s 4ms/step - loss: 0.0182 - accuracy: 0.9941 - val_loss: 0.0904 - val_accuracy: 0.9766
Epoch 8/10
1500/1500 [==============================] - 6s 4ms/step - loss: 0.0141 - accuracy: 0.9953 - val_loss: 0.1079 - val_accuracy: 0.9756
Epoch 9/10
1500/1500 [==============================] - 6s 4ms/step - loss: 0.0123 - accuracy: 0.9962 - val_loss: 0.0947 - val_accuracy: 0.9783
Epoch 10/10
1500/1500 [==============================] - 6s 4ms/step - loss: 0.0118 - accuracy: 0.9960 - val_loss: 0.0889 - val_accuracy: 0.9797
313/313 [==============================] - 1s 4ms/step - loss: 0.0745 - accuracy: 0.9824
[0.07446283102035522, 0.9824000000953674]
###Markdown
Prequel - Saving and Loading ModelsAs we've seen, models can take a long time to train in many cases. Like with the sklearn models, we can save and load ours as they are trained and reused. This is a pretty integral part of making neural network models usable, so it is pretty easy. In addition to this we often see models saved in the h5 format, which just saves slightly less stuff along with the model. If we are using models trained elsewhere this format is very common.
###Code
# Save my model
model.save('model_path')
model = keras.models.load_model('model_path')
# Calling `save('my_model.h5')` creates a h5 file `my_model.h5`.
model.save("my_h5_model.h5")
# It can be used to reconstruct the model identically.
reconstructed_model = keras.models.load_model("my_h5_model.h5")
###Output
_____no_output_____
###Markdown
Network SizeProbably the first question that we will think of when building networks through Tensorflow is "how big should it be"? This is a very big question, and one of those ones without a real answer. We can put some guidelines in place to help us though. What Does the Size Mean?The size of a neural network is also known as the capacity. We can relate it roughly to the size of our first model, the tree. The larger a network is the higher its capacity to learn. What Size to UseWe can start with a few guidelines to have a reasonably sized neural network. These steps do not ensure an optimal solution, but they'll get us started. There really is not a prescribed method for calculating the optimal network size (beleive me, I've looked), but there are several rules of thumb we can build together: Start with an input layer that is either The width of the data, if the feature set is relatively small. A reasonably large number if the feature set is large. We don't have a true diving line, but 512 is a reasonable value to try for an upper end, at least at first. Add 1 or 2 hidden layers of the same size and observe the results. We want to keep the model smaller if making it larger doesn't improve things, so first we shoudl see how good of a job a small model does. If the data is very large, skipping past the 1 layer step may save some time since we can predict that we can do better with a larger model in advance. Increase layers of the same size until we get some overfitting and the training loss flattens. We want to reach the point where the model is getting to be excellent at predicting the training data. This is something we can see in the plot by noticing that the validation loss flatlines or starts to get worse. The training loss flattening is an indication that the model is not getting any better at learning the training data; we can use early stopping with a loose patience setting on training loss and lots of epochs to find this. Add regularization steps to cut down that overfitting. We can try regularization and dropouts to cut down on that overfitting. We probably want to try a few options, parameters, and combinations here, there's not really a way to know in advance which regularization will work best on our data. "Funnel" the layer size, potentially adding more layers. The traditional configuration of layers is to gradually decrease the size from the input layer towards the output layer. There is open debate on if this is better than having layers that are all the same size. We can play with this a little to see if results improve or not. Use pruning. Much like a tree we can prune back a model to fight overfitting. Height vs WidthAnother begged question is should we make networks wider (more neurons) or deeper (more layers)? Once again, there's no universal answer, but the general evidence leans towards more layers. There are several reasons for this, none of them definitive, but taken as a whole they add up to a strong case: Ability to learn different representation of the data - this will be more clear next time when we start to look at some image specific neural networks, but one of the cool features of neural networks is that at each layer the network "sees" a different representation of the data, as it goes through each round of transformations. This has the effect of allowing it to identify different features at each layer, and use those features to make more and more accurate predictions. We'll examine this more soon. Avoiding overfitting - extremely wide neural networks tend towards overfitting the training data and not generalizing as well to new data. Ability to add interim steps - with a multi layer network we can add multiple steps such as regularization or dropouts, again to fight overfitting. Automatic feature selection - deep neural networks will automatically perform a type of feature selection as the least important features are minimized in their importance. This is an emerging area of research - some people have argued that well designed neural networks can remove the need for feature selection, and neural networks are being created to be feature selection tools. We can see this illustrated most clearly with images again, we feed a network an entire image, and get a prediction. Note that this isn't a total rejection of feature selection for neural networks, improving the feature set will impact neural network models just as it will for ordinary models; with neural networks we just have the potential for the network to "cover for mistakes" in the features. This is more dramatic as data size and network size increase. Results - deep learning has become a common term recently for a reason, due to the success of deep neural networks with many layers. Most of the cool stuff that we see coming from AI such as image recognition, translation, and self navigating robots are the result of deep learning networks. In practice these networks have tended to outperform shallower ones, especially in more complex tasks. Why not make a model that is both very wide and very deep? This will tend to overfit as it can "memorize" the training data. With large datasets we do see very large models in some cases, since the more data we have, the more fitting we can handle.
###Code
# Big Model
model = keras.Sequential()
model.add(InputLayer(input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(784, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(784, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
# Train the digit classification model
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics='accuracy')
model.fit(
train_images,
train_labels,
epochs=10,
validation_split=0.2,
)
model.evaluate(test_images, test_labels)
# Big Model
model = keras.Sequential()
model.add(InputLayer(input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
# Train the digit classification model
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics='accuracy')
model.fit(
train_images,
train_labels,
epochs=10,
validation_split=0.2,
)
model.evaluate(test_images, test_labels)
# Tapered Model
model = keras.Sequential()
model.add(InputLayer(input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(350, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(200, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(100, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(50, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
# Train the digit classification model
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics='accuracy')
model.fit(
train_images,
train_labels,
epochs=10,
validation_split=0.2,
)
model.evaluate(test_images, test_labels)
###Output
Epoch 1/10
1500/1500 [==============================] - 9s 6ms/step - loss: 0.3934 - accuracy: 0.8860 - val_loss: 0.1647 - val_accuracy: 0.9519
Epoch 2/10
1500/1500 [==============================] - 8s 5ms/step - loss: 0.1698 - accuracy: 0.9566 - val_loss: 0.1172 - val_accuracy: 0.9693
Epoch 3/10
1500/1500 [==============================] - 8s 6ms/step - loss: 0.1400 - accuracy: 0.9647 - val_loss: 0.1142 - val_accuracy: 0.9702
Epoch 4/10
1500/1500 [==============================] - 8s 5ms/step - loss: 0.1150 - accuracy: 0.9689 - val_loss: 0.1036 - val_accuracy: 0.9737
Epoch 5/10
1500/1500 [==============================] - 8s 5ms/step - loss: 0.0969 - accuracy: 0.9744 - val_loss: 0.1084 - val_accuracy: 0.9736
Epoch 6/10
1500/1500 [==============================] - 8s 5ms/step - loss: 0.0892 - accuracy: 0.9762 - val_loss: 0.1045 - val_accuracy: 0.9738
Epoch 7/10
1500/1500 [==============================] - 8s 5ms/step - loss: 0.0779 - accuracy: 0.9795 - val_loss: 0.0994 - val_accuracy: 0.9768
Epoch 8/10
1500/1500 [==============================] - 9s 6ms/step - loss: 0.0706 - accuracy: 0.9818 - val_loss: 0.1299 - val_accuracy: 0.9717
Epoch 9/10
1500/1500 [==============================] - 9s 6ms/step - loss: 0.0645 - accuracy: 0.9836 - val_loss: 0.1425 - val_accuracy: 0.9748
Epoch 10/10
1500/1500 [==============================] - 8s 5ms/step - loss: 0.0621 - accuracy: 0.9836 - val_loss: 0.1110 - val_accuracy: 0.9764
###Markdown
Epochs and Batch Sizes EpochsEach epoch is a run through all of the training data. Epochs are simple, we can set a large number and use early stopping to cut things off when we've reached the best result. Batch SizesBatch size determines how many records are processed before the gradients are updated - i.e. the number of records between one forward and backwards pass.The batch sizes are a matter of very open debate for the optimal solution. At the high end, batch sizes are limited by what can fit in memory. When dealing with very large data this may matter as a batch that is a small fraction of the data may be a massive absolute size. At the lower end using smaller batches gives the same effect as it does when we looked at regular gradient descent - the gradients become less stable as we are relying on a smaller number of records. In reading more about batch sizes I want to update my recommendation to be even smaller than the 50 to 150 I suggested before, down to less than 100, even as small as into the single digits. There is research that smaller batch sizes tend to produce models that generalize better than ones with larger batches. Larger batch sizes do tend to be processed more quickly, sometimes substantially so, as the hardware is better able to be "saturated" with data to process. Dont' stress too much on batch size, this is really something that needs to be grid searched to find a great answer.
###Code
# Big Batch
model = keras.Sequential()
model.add(InputLayer(input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(350, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
# Train the digit classification model
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics='accuracy')
model.fit(
train_images,
train_labels,
epochs=10,
batch_size=5000,
validation_split=0.2,
)
model.evaluate(test_images, test_labels)
# Small Batch
model = keras.Sequential()
model.add(InputLayer(input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(500, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(350, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
# Train the digit classification model
model.compile(optimizer='adam', loss="categorical_crossentropy", metrics='accuracy')
# Note: Batch size of 2 is good to test
# But it can take a VERY long time
model.fit(
train_images,
train_labels,
epochs=10,
batch_size=8,
validation_split=0.2,
)
model.evaluate(test_images, test_labels)
###Output
Epoch 1/10
24000/24000 [==============================] - 112s 5ms/step - loss: 0.3057 - accuracy: 0.9136 - val_loss: 0.1563 - val_accuracy: 0.9572
Epoch 2/10
24000/24000 [==============================] - 111s 5ms/step - loss: 0.2223 - accuracy: 0.9451 - val_loss: 0.1787 - val_accuracy: 0.9571
Epoch 3/10
24000/24000 [==============================] - 111s 5ms/step - loss: 0.2160 - accuracy: 0.9524 - val_loss: 0.1620 - val_accuracy: 0.9632
Epoch 4/10
24000/24000 [==============================] - 101s 4ms/step - loss: 0.2002 - accuracy: 0.9563 - val_loss: 0.1707 - val_accuracy: 0.9657
Epoch 5/10
24000/24000 [==============================] - 101s 4ms/step - loss: 0.2018 - accuracy: 0.9581 - val_loss: 0.1623 - val_accuracy: 0.9679
Epoch 6/10
24000/24000 [==============================] - 111s 5ms/step - loss: 0.1947 - accuracy: 0.9603 - val_loss: 0.2034 - val_accuracy: 0.9685
Epoch 7/10
24000/24000 [==============================] - 101s 4ms/step - loss: 0.1987 - accuracy: 0.9609 - val_loss: 0.1991 - val_accuracy: 0.9702
Epoch 8/10
24000/24000 [==============================] - 101s 4ms/step - loss: 0.1911 - accuracy: 0.9615 - val_loss: 0.1996 - val_accuracy: 0.9711
Epoch 9/10
24000/24000 [==============================] - 110s 5ms/step - loss: 0.1874 - accuracy: 0.9633 - val_loss: 0.2039 - val_accuracy: 0.9697
Epoch 10/10
24000/24000 [==============================] - 101s 4ms/step - loss: 0.1963 - accuracy: 0.9640 - val_loss: 0.2380 - val_accuracy: 0.9728
###Markdown
OptimizerOf all options the optimizer is the one we will care about the least. Each different optimizer is a different algorithm for doing the gradient descent. The optimizers have different results with respect to speed, memory usage, computational expense, and likelyhood to get stuck in a local minima. Adam is a good compromise between all factors and is very commonly used. We'll just use this for our work. One other common one is RMSprop, if you're feeling spicy, give that a try and see if there are any imporvements.
###Code
optimizer_1 = tf.keras.optimizers.Adam()
optimizer_2 = tf.keras.optimizers.RMSprop()
###Output
_____no_output_____
###Markdown
Activation Activation functions are the key to adding non-linearity to the network allowing it to learn complex and non-linear relationships in the data. We've used ReLU as the default and that is a solid choice in most cases. ReLU has one issue, the dying ReLU problem. This can happen when we get inputs to the activation function fall in the negative area. In short there can be neurons that "die" and never get updated again. To combat the dying ReLU problem there are a couple of other activation functions that avoid that issue - Leaky ReLU and ELU. Each one changes the negative values to something other than 0 - Leaky ReLU uses a slight linear gradient, ELU uses an exponential function for a similar, but curved, slight gradient.
###Code
# Take a leak
model = keras.Sequential()
model.add(InputLayer(input_shape=(28, 28)))
model.add(Flatten())
model.add(Dense(500, activation='leaky_relu'))
model.add(Dropout(0.2))
model.add(Dense(350, activation='leaky_relu'))
model.add(Dropout(0.2))
model.add(Dense(10, activation="softmax"))
# Train the digit classification model
model.compile(optimizer=optimizer_2, loss="categorical_crossentropy", metrics='accuracy')
model.fit(
train_images,
train_labels,
epochs=10,
batch_size=64,
validation_split=0.2,
)
model.evaluate(test_images, test_labels)
###Output
Epoch 1/10
750/750 [==============================] - 4s 5ms/step - loss: 0.2731 - accuracy: 0.9181 - val_loss: 0.1513 - val_accuracy: 0.9543
Epoch 2/10
750/750 [==============================] - 3s 5ms/step - loss: 0.1348 - accuracy: 0.9581 - val_loss: 0.1184 - val_accuracy: 0.9633
Epoch 3/10
750/750 [==============================] - 4s 5ms/step - loss: 0.1060 - accuracy: 0.9666 - val_loss: 0.1175 - val_accuracy: 0.9663
Epoch 4/10
750/750 [==============================] - 4s 5ms/step - loss: 0.0905 - accuracy: 0.9720 - val_loss: 0.0970 - val_accuracy: 0.9725
Epoch 5/10
750/750 [==============================] - 3s 5ms/step - loss: 0.0760 - accuracy: 0.9760 - val_loss: 0.0910 - val_accuracy: 0.9744
Epoch 6/10
750/750 [==============================] - 3s 5ms/step - loss: 0.0682 - accuracy: 0.9784 - val_loss: 0.1130 - val_accuracy: 0.9689
Epoch 7/10
750/750 [==============================] - 4s 5ms/step - loss: 0.0655 - accuracy: 0.9791 - val_loss: 0.1047 - val_accuracy: 0.9716
Epoch 8/10
750/750 [==============================] - 3s 5ms/step - loss: 0.0576 - accuracy: 0.9819 - val_loss: 0.1050 - val_accuracy: 0.9748
Epoch 9/10
750/750 [==============================] - 3s 5ms/step - loss: 0.0606 - accuracy: 0.9812 - val_loss: 0.1068 - val_accuracy: 0.9725
Epoch 10/10
750/750 [==============================] - 4s 5ms/step - loss: 0.0560 - accuracy: 0.9826 - val_loss: 0.1116 - val_accuracy: 0.9725
###Markdown
InitializationThe initialization provides the starting point for all the weights and bias values that we start out with. We initially started with random values in the scratch network - this is generally fine, but we can sometimes do better. Imbalanced WeightingOne application where initialization can help significantly is when dealing with imbalanced data. In this example of credit card fraud (real data that has been put through PCA), very, very few transactions are fraudulent. So we have a very imbalanced target value.
###Code
file = tf.keras.utils
raw_df = pd.read_csv('https://storage.googleapis.com/download.tensorflow.org/data/creditcard.csv')
raw_df.head()
###Output
_____no_output_____
###Markdown
Count the Target OutcomesCredit card fraud is relatively rare, at least in view of the total number of transactions. We can count up the target values to see exactly what the expected skew is.
###Code
# Bincount will count the number in each category
neg, pos = np.bincount(raw_df['Class'])
total = neg + pos
print('Examples:\n Total: {}\n Positive: {} ({:.2f}% of total)\n'.format(
total, pos, 100 * pos / total))
###Output
Examples:
Total: 284807
Positive: 492 (0.17% of total)
###Markdown
We Have an ImbalanceA big one.
###Code
# pop just removes a column. The equivalent of how we normally drop.
# the TF docs commonly use this, so I've left it as is.
cleaned_df = raw_df.copy()
# You don't want the `Time` column.
cleaned_df.pop('Time')
# Use a utility from sklearn to split and shuffle your dataset.
train_df, test_df = train_test_split(cleaned_df, test_size=0.2)
# Form np arrays of labels and features.
train_labels = np.array(train_df.pop('Class'))
test_labels = np.array(test_df.pop('Class'))
train_features = np.array(train_df)
test_features = np.array(test_df)
scaler = StandardScaler()
train_features = scaler.fit_transform(train_features)
test_features = scaler.transform(test_features)
#train_features = np.clip(train_features, -5, 5)
#test_features = np.clip(test_features, -5, 5)
print('Training features shape:', train_features.shape)
print('Training labels shape:', train_labels.shape)
print('Test features shape:', test_features.shape)
print('Test labels shape:', test_labels.shape)
###Output
Training features shape: (227845, 29)
Training labels shape: (227845,)
Test features shape: (56962, 29)
Test labels shape: (56962,)
###Markdown
Create a Biased ModelAs a side note, we can also specify a lot of different metrics to return, if we want to. The bias of the data is inserted in the model compilation step on the output layer. What does this do? It preconfigures the output layer to "expect" results to be this skewed. Recall that, along with the weight, the bias values are one of the things that is learned in training. By default the initial values are randomized, so the model needs to learn the skew towards the imbalance - if the balance between classes is moderate, that's not a big deal; if the balance is so drastically skewed in one direction, that's less practical. With the preset bias we can speed convergance and likely reduce loss. Other Imbalenced WorkOther things that we looked at to improve balance such as under/over sampling still works with neural networks as it would with anything else. This is just one nn-specific thing that we can implement with minimal extra work. MetricsWe can add a bunch of metrics to what we get returned by creating a list of the metrics that we want. Keras.metrics has a list, they are all the metrics we might expect.
###Code
METRICS = [
#keras.metrics.TruePositives(name='tp'),
#keras.metrics.FalsePositives(name='fp'),
#keras.metrics.TrueNegatives(name='tn'),
#keras.metrics.FalseNegatives(name='fn'),
keras.metrics.BinaryAccuracy(name='accuracy'),
keras.metrics.Precision(name='precision'),
keras.metrics.Recall(name='recall'),
keras.metrics.AUC(name='auc'),
keras.metrics.AUC(name='prc', curve='PR'), # precision-recall curve
]
#def make_model(metrics=METRICS, output_bias=None):
initial_bias = np.log([pos/neg])
output_bias = tf.keras.initializers.Constant(initial_bias)
model = keras.Sequential()
model.add(keras.layers.Dense(16, activation='relu',input_shape=(train_features.shape[-1],)))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(1, activation='sigmoid', bias_initializer=output_bias))
model.summary()
#model.compile(optimizer=keras.optimizers.Adam(learning_rate=1e-3),loss=keras.losses.BinaryCrossentropy(),metrics=metrics)
model.compile(optimizer="adam",loss="binary_crossentropy",metrics=METRICS)
#Fit
model.fit(
train_features,
train_labels,
epochs=10,
batch_size=64,
validation_split=0.2,
)
model.evaluate(test_features, test_labels)
###Output
Epoch 1/10
2849/2849 [==============================] - 34s 10ms/step - loss: 0.0060 - tp: 150.0000 - fp: 31.0000 - tn: 181931.0000 - fn: 164.0000 - accuracy: 0.9989 - precision: 0.8287 - recall: 0.4777 - auc: 0.9005 - prc: 0.5935 - val_loss: 0.0029 - val_tp: 51.0000 - val_fp: 6.0000 - val_tn: 45496.0000 - val_fn: 16.0000 - val_accuracy: 0.9995 - val_precision: 0.8947 - val_recall: 0.7612 - val_auc: 0.9325 - val_prc: 0.7801
Epoch 2/10
2849/2849 [==============================] - 26s 9ms/step - loss: 0.0046 - tp: 177.0000 - fp: 37.0000 - tn: 181925.0000 - fn: 137.0000 - accuracy: 0.9990 - precision: 0.8271 - recall: 0.5637 - auc: 0.9257 - prc: 0.6744 - val_loss: 0.0026 - val_tp: 45.0000 - val_fp: 5.0000 - val_tn: 45497.0000 - val_fn: 22.0000 - val_accuracy: 0.9994 - val_precision: 0.9000 - val_recall: 0.6716 - val_auc: 0.9326 - val_prc: 0.8127
Epoch 3/10
2849/2849 [==============================] - 27s 9ms/step - loss: 0.0043 - tp: 195.0000 - fp: 35.0000 - tn: 181927.0000 - fn: 119.0000 - accuracy: 0.9992 - precision: 0.8478 - recall: 0.6210 - auc: 0.9210 - prc: 0.6941 - val_loss: 0.0029 - val_tp: 39.0000 - val_fp: 3.0000 - val_tn: 45499.0000 - val_fn: 28.0000 - val_accuracy: 0.9993 - val_precision: 0.9286 - val_recall: 0.5821 - val_auc: 0.9326 - val_prc: 0.8129
Epoch 4/10
2849/2849 [==============================] - 27s 10ms/step - loss: 0.0042 - tp: 186.0000 - fp: 24.0000 - tn: 181938.0000 - fn: 128.0000 - accuracy: 0.9992 - precision: 0.8857 - recall: 0.5924 - auc: 0.9273 - prc: 0.7161 - val_loss: 0.0026 - val_tp: 53.0000 - val_fp: 8.0000 - val_tn: 45494.0000 - val_fn: 14.0000 - val_accuracy: 0.9995 - val_precision: 0.8689 - val_recall: 0.7910 - val_auc: 0.9325 - val_prc: 0.8001
Epoch 5/10
2849/2849 [==============================] - 27s 9ms/step - loss: 0.0041 - tp: 196.0000 - fp: 38.0000 - tn: 181924.0000 - fn: 118.0000 - accuracy: 0.9991 - precision: 0.8376 - recall: 0.6242 - auc: 0.9339 - prc: 0.6924 - val_loss: 0.0025 - val_tp: 44.0000 - val_fp: 3.0000 - val_tn: 45499.0000 - val_fn: 23.0000 - val_accuracy: 0.9994 - val_precision: 0.9362 - val_recall: 0.6567 - val_auc: 0.9326 - val_prc: 0.8254
Epoch 6/10
2849/2849 [==============================] - 37s 13ms/step - loss: 0.0041 - tp: 192.0000 - fp: 34.0000 - tn: 181928.0000 - fn: 122.0000 - accuracy: 0.9991 - precision: 0.8496 - recall: 0.6115 - auc: 0.9306 - prc: 0.7141 - val_loss: 0.0026 - val_tp: 45.0000 - val_fp: 3.0000 - val_tn: 45499.0000 - val_fn: 22.0000 - val_accuracy: 0.9995 - val_precision: 0.9375 - val_recall: 0.6716 - val_auc: 0.9326 - val_prc: 0.8245
Epoch 7/10
2849/2849 [==============================] - 27s 9ms/step - loss: 0.0039 - tp: 190.0000 - fp: 33.0000 - tn: 181929.0000 - fn: 124.0000 - accuracy: 0.9991 - precision: 0.8520 - recall: 0.6051 - auc: 0.9355 - prc: 0.7171 - val_loss: 0.0025 - val_tp: 46.0000 - val_fp: 3.0000 - val_tn: 45499.0000 - val_fn: 21.0000 - val_accuracy: 0.9995 - val_precision: 0.9388 - val_recall: 0.6866 - val_auc: 0.9326 - val_prc: 0.8229
Epoch 8/10
2849/2849 [==============================] - 35s 12ms/step - loss: 0.0036 - tp: 208.0000 - fp: 31.0000 - tn: 181931.0000 - fn: 106.0000 - accuracy: 0.9992 - precision: 0.8703 - recall: 0.6624 - auc: 0.9451 - prc: 0.7607 - val_loss: 0.0024 - val_tp: 53.0000 - val_fp: 6.0000 - val_tn: 45496.0000 - val_fn: 14.0000 - val_accuracy: 0.9996 - val_precision: 0.8983 - val_recall: 0.7910 - val_auc: 0.9326 - val_prc: 0.8267
Epoch 9/10
2849/2849 [==============================] - 34s 12ms/step - loss: 0.0036 - tp: 208.0000 - fp: 29.0000 - tn: 181933.0000 - fn: 106.0000 - accuracy: 0.9993 - precision: 0.8776 - recall: 0.6624 - auc: 0.9339 - prc: 0.7555 - val_loss: 0.0023 - val_tp: 50.0000 - val_fp: 3.0000 - val_tn: 45499.0000 - val_fn: 17.0000 - val_accuracy: 0.9996 - val_precision: 0.9434 - val_recall: 0.7463 - val_auc: 0.9326 - val_prc: 0.8326
Epoch 10/10
2849/2849 [==============================] - 27s 9ms/step - loss: 0.0039 - tp: 191.0000 - fp: 36.0000 - tn: 181926.0000 - fn: 123.0000 - accuracy: 0.9991 - precision: 0.8414 - recall: 0.6083 - auc: 0.9418 - prc: 0.7210 - val_loss: 0.0023 - val_tp: 52.0000 - val_fp: 4.0000 - val_tn: 45498.0000 - val_fn: 15.0000 - val_accuracy: 0.9996 - val_precision: 0.9286 - val_recall: 0.7761 - val_auc: 0.9326 - val_prc: 0.8329
###Markdown
PruningWe can also use pruning to improve our networks, which is built into Tensorflow and is similar in concept to the tree pruning we did earlier. Pruning ResultsPruning removes the least useful weights, increasing sparsity. These sparse models require less processing (since many calculations will be M * 0) and may be compressed down to take less space in memory. Smaller ModelsOne other consideration is that we can use pruning to create smaller models that are better able to be executed on weaker hardware. In the context of a full computer, creating a prediction with a neural network is pretty fast. If we want to move the model to small embedded devices though, the memory and processing needs can still be excessive. Some scenarios where this comes up are things like security cameras that can recognize images, robots that can navigate themselves, or evern small computers like a Raspberry Pi. This challenge is magnified if you are dealing with something like video, which can generate 30+ images per second. Small models that are almost as good, but can be run with less compouting power allow the power of neural networks to be expanded to more devices - train on a powerful computer, us on a small and weak computer.We can use the tflite set of tools to create special models that are optimized for lower computing power devices, though we won't explore that here. **Note:** pruning is largely a step that is for deployment of models, as we can make the processing more efficient and the memory required lesser. For us, it isn't the most critical of steps. In general, a smaller model that produces the same or similar accuracy is better, as you can do more with less. The reduction of overfitting and potential accuracy benefits are somewhat secondary to making the model more usable in practice.
###Code
!pip install tensorflow_model_optimization
import tensorflow_model_optimization as tfmot
prune_low_magnitude = tfmot.sparsity.keras.prune_low_magnitude
model_for_pruning = prune_low_magnitude(model)
model_for_pruning.compile(optimizer="adam",loss="binary_crossentropy",metrics=METRICS)
model_for_pruning.summary()
callbacks = [
tfmot.sparsity.keras.UpdatePruningStep()
]
model_for_pruning.fit(train_features, train_labels,
batch_size=64, epochs=10, validation_split=.2,
callbacks=callbacks)
model_for_pruning.evaluate(test_features, test_labels)
###Output
Epoch 1/10
2849/2849 [==============================] - 37s 12ms/step - loss: 0.0070 - tp: 165.0000 - fp: 20.0000 - tn: 227444.0000 - fn: 216.0000 - accuracy: 0.9990 - precision: 0.8919 - recall: 0.4331 - auc: 0.7699 - prc: 0.5033 - val_loss: 0.0042 - val_tp: 2.0000 - val_fp: 0.0000e+00 - val_tn: 45502.0000 - val_fn: 65.0000 - val_accuracy: 0.9986 - val_precision: 1.0000 - val_recall: 0.0299 - val_auc: 0.9178 - val_prc: 0.8041
Epoch 2/10
2849/2849 [==============================] - 31s 11ms/step - loss: 0.0065 - tp: 126.0000 - fp: 17.0000 - tn: 181945.0000 - fn: 188.0000 - accuracy: 0.9989 - precision: 0.8811 - recall: 0.4013 - auc: 0.7370 - prc: 0.4301 - val_loss: 0.0042 - val_tp: 2.0000 - val_fp: 0.0000e+00 - val_tn: 45502.0000 - val_fn: 65.0000 - val_accuracy: 0.9986 - val_precision: 1.0000 - val_recall: 0.0299 - val_auc: 0.9326 - val_prc: 0.8188
Epoch 3/10
2849/2849 [==============================] - 31s 11ms/step - loss: 0.0064 - tp: 123.0000 - fp: 14.0000 - tn: 181948.0000 - fn: 191.0000 - accuracy: 0.9989 - precision: 0.8978 - recall: 0.3917 - auc: 0.8383 - prc: 0.4415 - val_loss: 0.0038 - val_tp: 2.0000 - val_fp: 0.0000e+00 - val_tn: 45502.0000 - val_fn: 65.0000 - val_accuracy: 0.9986 - val_precision: 1.0000 - val_recall: 0.0299 - val_auc: 0.9325 - val_prc: 0.8255
Epoch 4/10
2849/2849 [==============================] - 32s 11ms/step - loss: 0.0060 - tp: 139.0000 - fp: 15.0000 - tn: 181947.0000 - fn: 175.0000 - accuracy: 0.9990 - precision: 0.9026 - recall: 0.4427 - auc: 0.9281 - prc: 0.4781 - val_loss: 0.0034 - val_tp: 11.0000 - val_fp: 0.0000e+00 - val_tn: 45502.0000 - val_fn: 56.0000 - val_accuracy: 0.9988 - val_precision: 1.0000 - val_recall: 0.1642 - val_auc: 0.9474 - val_prc: 0.8312
Epoch 5/10
2849/2849 [==============================] - 31s 11ms/step - loss: 0.0059 - tp: 136.0000 - fp: 20.0000 - tn: 181942.0000 - fn: 178.0000 - accuracy: 0.9989 - precision: 0.8718 - recall: 0.4331 - auc: 0.9348 - prc: 0.4667 - val_loss: 0.0040 - val_tp: 2.0000 - val_fp: 0.0000e+00 - val_tn: 45502.0000 - val_fn: 65.0000 - val_accuracy: 0.9986 - val_precision: 1.0000 - val_recall: 0.0299 - val_auc: 0.9473 - val_prc: 0.8275
Epoch 6/10
2849/2849 [==============================] - 31s 11ms/step - loss: 0.0063 - tp: 118.0000 - fp: 17.0000 - tn: 181945.0000 - fn: 196.0000 - accuracy: 0.9988 - precision: 0.8741 - recall: 0.3758 - auc: 0.9441 - prc: 0.4302 - val_loss: 0.0038 - val_tp: 2.0000 - val_fp: 0.0000e+00 - val_tn: 45502.0000 - val_fn: 65.0000 - val_accuracy: 0.9986 - val_precision: 1.0000 - val_recall: 0.0299 - val_auc: 0.9473 - val_prc: 0.8323
Epoch 7/10
2849/2849 [==============================] - 33s 12ms/step - loss: 0.0059 - tp: 127.0000 - fp: 9.0000 - tn: 181953.0000 - fn: 187.0000 - accuracy: 0.9989 - precision: 0.9338 - recall: 0.4045 - auc: 0.9480 - prc: 0.4596 - val_loss: 0.0030 - val_tp: 24.0000 - val_fp: 0.0000e+00 - val_tn: 45502.0000 - val_fn: 43.0000 - val_accuracy: 0.9991 - val_precision: 1.0000 - val_recall: 0.3582 - val_auc: 0.9474 - val_prc: 0.8347
Epoch 8/10
2849/2849 [==============================] - ETA: 0s - loss: 0.0060 - tp: 123.0000 - fp: 18.0000 - tn: 181944.0000 - fn: 191.0000 - accuracy: 0.9989 - precision: 0.8723 - recall: 0.3917 - auc: 0.9417 - prc: 0.4512
###Markdown
ExercisePredict the price of diamonds! 
###Code
ex_df = sns.load_dataset("diamonds")
ex_df = pd.get_dummies(ex_df)
ex_df.head()
y = ex_df["price"]
X = ex_df.drop(columns={"price"})
X_tr_ex, X_te_ex, y_tr_ex, y_te_ex = train_test_split(X, y)
start_width = X.shape[1]
start_width
###Output
_____no_output_____
###Markdown
Generate a BaselineI'll use a different loss - mean absolute percentage.
###Code
#baseline
normalizer = tf.keras.layers.Normalization(axis=-1)
normalizer.adapt(np.array(X_tr_ex))
mod_ex = keras.Sequential()
mod_ex.add(normalizer)
mod_ex.add(InputLayer(input_shape=(start_width,)))
mod_ex.add(Dense(start_width, activation='relu'))
mod_ex.add(Dense(1))
mod_ex.compile(optimizer='adam', loss="mean_absolute_percentage_error")
callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=20, restore_best_weights=True)
hist_ex = mod_ex.fit(
X_tr_ex,
y_tr_ex,
epochs=100,
batch_size=64,
validation_split=0.2,
callbacks=[callback],
verbose=0
)
print(mod_ex.evaluate(X_te_ex, y_te_ex))
plot_loss(hist_ex)
###Output
422/422 [==============================] - 1s 1ms/step - loss: 9.6126
9.612637519836426
|
examples/notebooks/lasso_regression.ipynb
|
###Markdown
Intel® Extension for Scikit-learn Lasso Regression for YearPredictionMSD dataset
###Code
from timeit import default_timer as timer
from sklearn import metrics
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
import requests
import warnings
import os
from IPython.display import HTML
warnings.filterwarnings('ignore')
###Output
_____no_output_____
###Markdown
Download the data
###Code
dataset_dir = 'data'
dataset_name = 'year_prediction_msd'
url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00203/YearPredictionMSD.txt.zip'
os.makedirs(dataset_dir, exist_ok=True)
local_url = os.path.join(dataset_dir, os.path.basename(url))
if not os.path.isfile(local_url):
response = requests.get(url, stream=True)
with open(local_url, 'wb+') as file:
for data in response.iter_content(8192):
file.write(data)
year = pd.read_csv(local_url, header=None)
x = year.iloc[:, 1:].to_numpy(dtype=np.float32)
y = year.iloc[:, 0].to_numpy(dtype=np.float32)
###Output
_____no_output_____
###Markdown
Split the data into train and test sets
###Code
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.1, random_state=0)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
###Output
_____no_output_____
###Markdown
Normalize the data
###Code
from sklearn.preprocessing import MinMaxScaler, StandardScaler
scaler_x = MinMaxScaler()
scaler_y = StandardScaler()
scaler_x.fit(x_train)
x_train = scaler_x.transform(x_train)
x_test = scaler_x.transform(x_test)
scaler_y.fit(y_train.reshape(-1, 1))
y_train = scaler_y.transform(y_train.reshape(-1, 1)).ravel()
y_test = scaler_y.transform(y_test.reshape(-1, 1)).ravel()
###Output
_____no_output_____
###Markdown
Patch original Scikit-learn with Intel® Extension for Scikit-learnIntel® Extension for Scikit-learn (previously known as daal4py) contains drop-in replacement functionality for the stock Scikit-learn package. You can take advantage of the performance optimizations of Intel® Extension for Scikit-learn by adding just two lines of code before the usual Scikit-learn imports:
###Code
from sklearnex import patch_sklearn
patch_sklearn()
###Output
Intel(R) Extension for Scikit-learn* enabled (https://github.com/intel/scikit-learn-intelex)
###Markdown
Intel® Extension for Scikit-learn patching affects performance of specific Scikit-learn functionality. Refer to the [list of supported algorithms and parameters](https://intel.github.io/scikit-learn-intelex/algorithms.html) for details. In cases when unsupported parameters are used, the package fallbacks into original Scikit-learn. If the patching does not cover your scenarios, [submit an issue on GitHub](https://github.com/intel/scikit-learn-intelex/issues). Training of the Lasso algorithm with Intel® Extension for Scikit-learn for YearPredictionMSD dataset
###Code
from sklearn.linear_model import Lasso
params = {
"alpha": 0.01,
"fit_intercept": False,
"random_state": 0,
"copy_X": False,
}
start = timer()
model = Lasso(**params).fit(x_train, y_train)
train_patched = timer() - start
f"Intel® extension for Scikit-learn time: {train_patched:.2f} s"
###Output
_____no_output_____
###Markdown
Predict and get a result of the Lasso algorithm with Intel® Extension for Scikit-learn
###Code
y_predict = model.predict(x_test)
mse_metric_opt = metrics.mean_squared_error(y_test, y_predict)
f'Patched Scikit-learn MSE: {mse_metric_opt}'
###Output
_____no_output_____
###Markdown
Train the same algorithm with original Scikit-learnIn order to cancel optimizations, we use *unpatch_sklearn* and reimport the class Lasso
###Code
from sklearnex import unpatch_sklearn
unpatch_sklearn()
###Output
_____no_output_____
###Markdown
Training of the Lasso algorithm with original Scikit-learn library for YearPredictionMSD dataset
###Code
from sklearn.linear_model import Lasso
start = timer()
model = Lasso(**params).fit(x_train, y_train)
train_unpatched = timer() - start
f"Original Scikit-learn time: {train_unpatched:.2f} s"
###Output
_____no_output_____
###Markdown
Predict and get a result of the Lasso algorithm with original Scikit-learn
###Code
y_predict = model.predict(x_test)
mse_metric_original = metrics.mean_squared_error(y_test, y_predict)
f'Original Scikit-learn MSE: {mse_metric_original}'
HTML(f"<h3>Compare MSE metric of patched Scikit-learn and original</h3>"
f"MSE metric of patched Scikit-learn: {mse_metric_opt} <br>"
f"MSE metric of unpatched Scikit-learn: {mse_metric_original} <br>"
f"Metrics ratio: {mse_metric_opt/mse_metric_original} <br>"
f"<h3>With Scikit-learn-intelex patching you can:</h3>"
f"<ul>"
f"<li>Use your Scikit-learn code for training and prediction with minimal changes (a couple of lines of code);</li>"
f"<li>Fast execution training and prediction of Scikit-learn models;</li>"
f"<li>Get the similar quality</li>"
f"<li>Get speedup in <strong>{(train_unpatched/train_patched):.1f}</strong> times.</li>"
f"</ul>")
###Output
_____no_output_____
|
current_position.ipynb
|
###Markdown
*This code extracts data from portfolio file (either in .pdf or .xlsx format) and latest trading data from Dhaka Stock Exchange Website, then portrays the current position of the portfolio. The scenario is updated every 30 seconds for the sake of continuous monitoring.* **Disclaimer** 1. You need to upload the portfolio file from the "Files" section. The accepted format is either .pdf or .xlsx. 2. The broker provided .pdf file might not suit the code, in that case you may provide the portfolio data in .xlsx format. Please ensure that the .xlsx file has TRADING.CODE, Total Quantity, Average Cost and Total Cost in the first four columns. 3. The gain/loss is calculated considering Latest Trading Price during the trading hours and Closing Price after trading hours.
###Code
# take care of the warnings
import warnings
warnings.filterwarnings("ignore")
warnings.simplefilter('ignore')
# check if bdshare pypdf and tabula are installed if not install
try:
from bdshare import get_current_trade_data
except:
!pip install bdshare
from bdshare import get_current_trade_data
try:
import tabula
except:
!pip install tabula-py
import tabula
# import required modules
import pandas as pd
import numpy as np
# set the number format
pd.options.display.float_format = "{:.2f}".format
# set pandas display width
pd.set_option("display.max_rows", 50)
pd.set_option("display.max_columns", 14)
pd.set_option("display.width", 1000)
# function for extracting the data
def dse_current(filename):
# get the portfolio data
if ".pdf" in filename:
df_org = tabula.read_pdf(filename, multiple_tables=True, pages="all")[0]
port = df_org.iloc[1:-2, :]
req_cols = ["Company", "Total", "Average", "Total Cost"]
port = port[req_cols]
port["Total Cost"] = port["Total Cost"].str.replace(",", "")
cols = ["Total", "Average", "Total Cost"]
port[cols] = port[cols].apply(pd.to_numeric, errors="coerce")
port.columns = ["TRADING.CODE", "Total Quantity", "Average Cost", "Total Cost"]
elif ".xlsx" in filename:
port = pd.read_excel(filename)
port.columns = ["TRADING.CODE", "Total Quantity", "Average Cost", "Total Cost"]
# Get the instruments under portfolio
instruments = port["TRADING.CODE"]
instruments = instruments.tolist()
# create the blank dataframe for merging all instruments' data
df = []
# extracts all instruments data
for instruments in instruments:
stock_data = get_current_trade_data(instruments)
df.append(stock_data)
df = pd.concat(df)
df = df.sort_values(by=["symbol"])
df = df[
["symbol", "ltp", "high", "low", "close", "ycp", "trade", "value", "volume"]
]
df = df.reset_index()
df = df.drop(columns=["index"])
df.columns = [
"TRADING.CODE",
"LTP",
"HIGH",
"LOW",
"CLOSEP",
"YCP",
"TRADE",
"VALUE",
"VOLUME",
]
cols = df.columns.drop(["TRADING.CODE"])
df[cols] = df[cols].apply(pd.to_numeric, errors="coerce")
# merge the data with uploaded portfolio
df = pd.merge(port, df, on="TRADING.CODE")
# get current gain/loss
if df["CLOSEP"].sum() == 0:
df["Current_Val"] = np.where(
df["LTP"] > 0,
df["Total Quantity"] * df["LTP"],
df["Total Quantity"] * df["YCP"],
)
else:
df["Current_Val"] = df["Total Quantity"] * df["CLOSEP"]
df["Gain/Loss"] = df["Current_Val"] - df["Total Cost"]
# generate the summary table
total_quan = df["Total Quantity"].sum()
total_cost = df["Total Cost"].sum()
mkt_val = df["Current_Val"].sum()
gain = df["Gain/Loss"].sum()
summary = [total_quan, total_cost, mkt_val, gain]
summary_tab = pd.DataFrame(
np.array(summary).reshape(1, 4),
columns=["Total Shares", "Total Cost", "Total Market Value", "Total Gain/Loss"],
)
return (df, summary_tab)
# Set the time format
from datetime import datetime
import time
from IPython.display import clear_output
from pytz import timezone
bd = timezone("Asia/Dhaka")
# function for repeating the previous function after certain interval
def periodic_position(interval, filename):
while True:
current_pos = dse_current(filename)[0]
summary_tab = dse_current(filename)[1]
clear_output(wait=True)
print("As of:")
print(datetime.now(bd).strftime("%H:%M:%S.%f - %b %d %Y"))
print("---")
print(current_pos)
print("---")
# print("Current Total Gain/Loss is: {:2f}".format(gain))
print(summary_tab)
print("---")
time.sleep(interval)
# now keep producing the current status of the portfolio
periodic_position(30, "P 25-05-2021.pdf")
###Output
_____no_output_____
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.