code
stringlengths 2.5k
150k
| kind
stringclasses 1
value |
---|---|
# Interactive single compartment HH example
To run this interactive Jupyter Notebook, please click on the rocket icon 🚀 in the top panel. For more information, please see {ref}`how to use this documentation <userdocs:usage:jupyterbooks>`. Please uncomment the line below if you use the Google Colab. (It does not include these packages by default).
```
#%pip install pyneuroml neuromllite NEURON
import math
from neuroml import NeuroMLDocument
from neuroml import Cell
from neuroml import IonChannelHH
from neuroml import GateHHRates
from neuroml import BiophysicalProperties
from neuroml import MembraneProperties
from neuroml import ChannelDensity
from neuroml import HHRate
from neuroml import SpikeThresh
from neuroml import SpecificCapacitance
from neuroml import InitMembPotential
from neuroml import IntracellularProperties
from neuroml import IncludeType
from neuroml import Resistivity
from neuroml import Morphology, Segment, Point3DWithDiam
from neuroml import Network, Population
from neuroml import PulseGenerator, ExplicitInput
import numpy as np
from pyneuroml import pynml
from pyneuroml.lems import LEMSSimulation
```
## Declare the model
### Create ion channels
```
def create_na_channel():
"""Create the Na channel.
This will create the Na channel and save it to a file.
It will also validate this file.
returns: name of the created file
"""
na_channel = IonChannelHH(id="na_channel", notes="Sodium channel for HH cell", conductance="10pS", species="na")
gate_m = GateHHRates(id="na_m", instances="3", notes="m gate for na channel")
m_forward_rate = HHRate(type="HHExpLinearRate", rate="1per_ms", midpoint="-40mV", scale="10mV")
m_reverse_rate = HHRate(type="HHExpRate", rate="4per_ms", midpoint="-65mV", scale="-18mV")
gate_m.forward_rate = m_forward_rate
gate_m.reverse_rate = m_reverse_rate
na_channel.gate_hh_rates.append(gate_m)
gate_h = GateHHRates(id="na_h", instances="1", notes="h gate for na channel")
h_forward_rate = HHRate(type="HHExpRate", rate="0.07per_ms", midpoint="-65mV", scale="-20mV")
h_reverse_rate = HHRate(type="HHSigmoidRate", rate="1per_ms", midpoint="-35mV", scale="10mV")
gate_h.forward_rate = h_forward_rate
gate_h.reverse_rate = h_reverse_rate
na_channel.gate_hh_rates.append(gate_h)
na_channel_doc = NeuroMLDocument(id="na_channel", notes="Na channel for HH neuron")
na_channel_fn = "HH_example_na_channel.nml"
na_channel_doc.ion_channel_hhs.append(na_channel)
pynml.write_neuroml2_file(nml2_doc=na_channel_doc, nml2_file_name=na_channel_fn, validate=True)
return na_channel_fn
def create_k_channel():
"""Create the K channel
This will create the K channel and save it to a file.
It will also validate this file.
:returns: name of the K channel file
"""
k_channel = IonChannelHH(id="k_channel", notes="Potassium channel for HH cell", conductance="10pS", species="k")
gate_n = GateHHRates(id="k_n", instances="4", notes="n gate for k channel")
n_forward_rate = HHRate(type="HHExpLinearRate", rate="0.1per_ms", midpoint="-55mV", scale="10mV")
n_reverse_rate = HHRate(type="HHExpRate", rate="0.125per_ms", midpoint="-65mV", scale="-80mV")
gate_n.forward_rate = n_forward_rate
gate_n.reverse_rate = n_reverse_rate
k_channel.gate_hh_rates.append(gate_n)
k_channel_doc = NeuroMLDocument(id="k_channel", notes="k channel for HH neuron")
k_channel_fn = "HH_example_k_channel.nml"
k_channel_doc.ion_channel_hhs.append(k_channel)
pynml.write_neuroml2_file(nml2_doc=k_channel_doc, nml2_file_name=k_channel_fn, validate=True)
return k_channel_fn
def create_leak_channel():
"""Create a leak channel
This will create the leak channel and save it to a file.
It will also validate this file.
:returns: name of leak channel nml file
"""
leak_channel = IonChannelHH(id="leak_channel", conductance="10pS", notes="Leak conductance")
leak_channel_doc = NeuroMLDocument(id="leak_channel", notes="leak channel for HH neuron")
leak_channel_fn = "HH_example_leak_channel.nml"
leak_channel_doc.ion_channel_hhs.append(leak_channel)
pynml.write_neuroml2_file(nml2_doc=leak_channel_doc, nml2_file_name=leak_channel_fn, validate=True)
return leak_channel_fn
```
### Create cell
```
def create_cell():
"""Create the cell.
:returns: name of the cell nml file
"""
# Create the nml file and add the ion channels
hh_cell_doc = NeuroMLDocument(id="cell", notes="HH cell")
hh_cell_fn = "HH_example_cell.nml"
hh_cell_doc.includes.append(IncludeType(href=create_na_channel()))
hh_cell_doc.includes.append(IncludeType(href=create_k_channel()))
hh_cell_doc.includes.append(IncludeType(href=create_leak_channel()))
# Define a cell
hh_cell = Cell(id="hh_cell", notes="A single compartment HH cell")
# Define its biophysical properties
bio_prop = BiophysicalProperties(id="hh_b_prop")
# notes="Biophysical properties for HH cell")
# Membrane properties are a type of biophysical properties
mem_prop = MembraneProperties()
# Add membrane properties to the biophysical properties
bio_prop.membrane_properties = mem_prop
# Append to cell
hh_cell.biophysical_properties = bio_prop
# Channel density for Na channel
na_channel_density = ChannelDensity(id="na_channels", cond_density="120.0 mS_per_cm2", erev="50.0 mV", ion="na", ion_channel="na_channel")
mem_prop.channel_densities.append(na_channel_density)
# Channel density for k channel
k_channel_density = ChannelDensity(id="k_channels", cond_density="360 S_per_m2", erev="-77mV", ion="k", ion_channel="k_channel")
mem_prop.channel_densities.append(k_channel_density)
# Leak channel
leak_channel_density = ChannelDensity(id="leak_channels", cond_density="3.0 S_per_m2", erev="-54.3mV", ion="non_specific", ion_channel="leak_channel")
mem_prop.channel_densities.append(leak_channel_density)
# Other membrane properties
mem_prop.spike_threshes.append(SpikeThresh(value="-20mV"))
mem_prop.specific_capacitances.append(SpecificCapacitance(value="1.0 uF_per_cm2"))
mem_prop.init_memb_potentials.append(InitMembPotential(value="-65mV"))
intra_prop = IntracellularProperties()
intra_prop.resistivities.append(Resistivity(value="0.03 kohm_cm"))
# Add to biological properties
bio_prop.intracellular_properties = intra_prop
# Morphology
morph = Morphology(id="hh_cell_morph")
# notes="Simple morphology for the HH cell")
seg = Segment(id="0", name="soma", notes="Soma segment")
# We want a diameter such that area is 1000 micro meter^2
# surface area of a sphere is 4pi r^2 = 4pi diam^2
diam = math.sqrt(1000 / math.pi)
proximal = distal = Point3DWithDiam(x="0", y="0", z="0", diameter=str(diam))
seg.proximal = proximal
seg.distal = distal
morph.segments.append(seg)
hh_cell.morphology = morph
hh_cell_doc.cells.append(hh_cell)
pynml.write_neuroml2_file(nml2_doc=hh_cell_doc, nml2_file_name=hh_cell_fn, validate=True)
return hh_cell_fn
```
### Create a network
```
def create_network():
"""Create the network
:returns: name of network nml file
"""
net_doc = NeuroMLDocument(id="network",
notes="HH cell network")
net_doc_fn = "HH_example_net.nml"
net_doc.includes.append(IncludeType(href=create_cell()))
# Create a population: convenient to create many cells of the same type
pop = Population(id="pop0", notes="A population for our cell", component="hh_cell", size=1)
# Input
pulsegen = PulseGenerator(id="pg", notes="Simple pulse generator", delay="100ms", duration="100ms", amplitude="0.08nA")
exp_input = ExplicitInput(target="pop0[0]", input="pg")
net = Network(id="single_hh_cell_network", note="A network with a single population")
net_doc.pulse_generators.append(pulsegen)
net.explicit_inputs.append(exp_input)
net.populations.append(pop)
net_doc.networks.append(net)
pynml.write_neuroml2_file(nml2_doc=net_doc, nml2_file_name=net_doc_fn, validate=True)
return net_doc_fn
```
## Plot the data we record
```
def plot_data(sim_id):
"""Plot the sim data.
Load the data from the file and plot the graph for the membrane potential
using the pynml generate_plot utility function.
:sim_id: ID of simulaton
"""
data_array = np.loadtxt(sim_id + ".dat")
pynml.generate_plot([data_array[:, 0]], [data_array[:, 1]], "Membrane potential", show_plot_already=False, save_figure_to=sim_id + "-v.png", xaxis="time (s)", yaxis="membrane potential (V)")
pynml.generate_plot([data_array[:, 0]], [data_array[:, 2]], "channel current", show_plot_already=False, save_figure_to=sim_id + "-i.png", xaxis="time (s)", yaxis="channel current (A)")
pynml.generate_plot([data_array[:, 0], data_array[:, 0]], [data_array[:, 3], data_array[:, 4]], "current density", labels=["Na", "K"], show_plot_already=False, save_figure_to=sim_id + "-iden.png", xaxis="time (s)", yaxis="current density (A_per_m2)")
```
## Create and run the simulation
Create the simulation, run it, record data, and plot the recorded information.
```
def main():
"""Main function
Include the NeuroML model into a LEMS simulation file, run it, plot some
data.
"""
# Simulation bits
sim_id = "HH_single_compartment_example_sim"
simulation = LEMSSimulation(sim_id=sim_id, duration=300, dt=0.01, simulation_seed=123)
# Include the NeuroML model file
simulation.include_neuroml2_file(create_network())
# Assign target for the simulation
simulation.assign_simulation_target("single_hh_cell_network")
# Recording information from the simulation
simulation.create_output_file(id="output0", file_name=sim_id + ".dat")
simulation.add_column_to_output_file("output0", column_id="pop0[0]/v", quantity="pop0[0]/v")
simulation.add_column_to_output_file("output0", column_id="pop0[0]/iChannels", quantity="pop0[0]/iChannels")
simulation.add_column_to_output_file("output0", column_id="pop0[0]/na/iDensity", quantity="pop0[0]/hh_b_prop/membraneProperties/na_channels/iDensity/")
simulation.add_column_to_output_file("output0", column_id="pop0[0]/k/iDensity", quantity="pop0[0]/hh_b_prop/membraneProperties/k_channels/iDensity/")
# Save LEMS simulation to file
sim_file = simulation.save_to_file()
# Run the simulation using the default jNeuroML simulator
pynml.run_lems_with_jneuroml(sim_file, max_memory="2G", nogui=True, plot=False)
# Plot the data
plot_data(sim_id)
if __name__ == "__main__":
main()
```
| github_jupyter |
## Amazon SageMaker Feature Store: Encrypt Data in your Online or Offline Feature Store using KMS key
This notebook demonstrates how to enable encyption for your data in your online or offline Feature Store using KMS key. We start by showing how to programmatically create a KMS key, and how to apply it to the feature store creation process for data encryption. The last portion of this notebook demonstrates how to verify that your KMS key is being used to encerypt your data in your feature store.
### Overview
1. Create a KMS key.
- How to create a KMS key programmatically using the KMS client from boto3?
2. Attach role to your KMS key.
- Attach the required entries to your policy for data encryption in your feature store.
3. Create an online or offline feature store and apply it to your feature store creation process.
- How to enable encryption for your online store?
- How to enable encryption for your offline store?
4. How to verify that your data is encrypted in your online or offline store?
### Prerequisites
This notebook uses both `boto3` and Python SDK libraries, and the `Python 3 (Data Science)` kernel. This notebook also works with Studio, Jupyter, and JupyterLab.
### Library Dependencies:
* sagemaker>=2.0.0
* numpy
* pandas
```
import sagemaker
import sys
import boto3
import pandas as pd
import numpy as np
import json
original_version = sagemaker.__version__
%pip install 'sagemaker>=2.0.0'
```
### Set up
```
sagemaker_session = sagemaker.Session()
s3_bucket_name = sagemaker_session.default_bucket()
prefix = "sagemaker-featurestore-kms-demo"
role = sagemaker.get_execution_role()
region = sagemaker_session.boto_region_name
```
Create a KMS client using boto3. Note that you can access your boto session through your sagemaker session, e.g.,`sagemaker_session`.
```
kms = sagemaker_session.boto_session.client("kms")
```
### KMS Policy Template
Below is the policy template you will use for creating a KMS key. You will specify your role to grant it access to various KMS operations that will be used in the back-end for encrypting your data in your Online or Offline Feature Store.
**Note**: You will need to substitute your Account number in for `123456789012` in the policy below for these lines: `arn:aws:cloudtrail:*:123456789012:trail/*`.
It is important to understand that the policy below will grant admin privileges for Customer Managed Keys (CMK) around viewing and revoking grants, decrypt and encrypt permissions on CloudTrail and full access permissions through Feature Store. Also, note that the the Feature Store Service creates additonal grants that are used for encryption purposes for your online store.
```
policy = {
"Version": "2012-10-17",
"Id": "key-policy-feature-store",
"Statement": [
{
"Sid": "Allow access through Amazon SageMaker Feature Store for all principals in the account that are authorized to use Amazon SageMaker Feature Store",
"Effect": "Allow",
"Principal": {"AWS": role},
"Action": [
"kms:Encrypt",
"kms:Decrypt",
"kms:DescribeKey",
"kms:CreateGrant",
"kms:RetireGrant",
"kms:ReEncryptFrom",
"kms:ReEncryptTo",
"kms:GenerateDataKey",
"kms:ListAliases",
"kms:ListGrants",
],
"Resource": ["*"],
"Condition": {"StringLike": {"kms:ViaService": "sagemaker.*.amazonaws.com"}},
},
{
"Sid": "Allow administrators to view the CMK and revoke grants",
"Effect": "Allow",
"Principal": {"AWS": [role]},
"Action": ["kms:Describe*", "kms:Get*", "kms:List*", "kms:RevokeGrant"],
"Resource": ["*"],
},
{
"Sid": "Enable CloudTrail Encrypt Permissions",
"Effect": "Allow",
"Principal": {"Service": "cloudtrail.amazonaws.com", "AWS": [role]},
"Action": "kms:GenerateDataKey*",
"Resource": "*",
"Condition": {
"StringLike": {
"kms:EncryptionContext:aws:cloudtrail:arn": [
"arn:aws:cloudtrail:*:123456789012:trail/*",
"arn:aws:cloudtrail:*:123456789012:trail/*",
]
}
},
},
{
"Sid": "Enable CloudTrail log decrypt permissions",
"Effect": "Allow",
"Principal": {"AWS": [role]},
"Action": "kms:Decrypt",
"Resource": ["*"],
"Condition": {"Null": {"kms:EncryptionContext:aws:cloudtrail:arn": "false"}},
},
],
}
```
Create your new KMS key using the policy above and your KMS client.
```
try:
new_kms_key = kms.create_key(
Policy=json.dumps(policy),
Description="string",
KeyUsage="ENCRYPT_DECRYPT",
CustomerMasterKeySpec="SYMMETRIC_DEFAULT",
Origin="AWS_KMS",
)
AliasName = "my-new-kms-key" ## provide a unique alias name
kms.create_alias(
AliasName="alias/" + AliasName, TargetKeyId=new_kms_key["KeyMetadata"]["KeyId"]
)
print(new_kms_key)
except Exception as e:
print("Error {}".format(e))
```
Now that we have our KMS key created and the necessary operations added to our role, we now load in our data.
```
customer_data = pd.read_csv("data/feature_store_introduction_customer.csv")
orders_data = pd.read_csv("data/feature_store_introduction_orders.csv")
customer_data.head()
orders_data.head()
customer_data.dtypes
orders_data.dtypes
```
### Creating Feature Groups
We first start by creating feature group names for customer_data and orders_data. Following this, we create two Feature Groups, one for customer_dat and another for orders_data
```
from time import gmtime, strftime, sleep
customers_feature_group_name = "customers-feature-group-" + strftime("%d-%H-%M-%S", gmtime())
orders_feature_group_name = "orders-feature-group-" + strftime("%d-%H-%M-%S", gmtime())
```
Instantiate a FeatureGroup object for customers_data and orders_data.
```
from sagemaker.feature_store.feature_group import FeatureGroup
customers_feature_group = FeatureGroup(
name=customers_feature_group_name, sagemaker_session=sagemaker_session
)
orders_feature_group = FeatureGroup(
name=orders_feature_group_name, sagemaker_session=sagemaker_session
)
import time
current_time_sec = int(round(time.time()))
record_identifier_feature_name = "customer_id"
```
Append EventTime feature to your data frame. This parameter is required, and time stamps each data point.
```
customer_data["EventTime"] = pd.Series([current_time_sec] * len(customer_data), dtype="float64")
orders_data["EventTime"] = pd.Series([current_time_sec] * len(orders_data), dtype="float64")
customer_data.head()
orders_data.head()
```
Load feature definitions to your feature group.
```
customers_feature_group.load_feature_definitions(data_frame=customer_data)
orders_feature_group.load_feature_definitions(data_frame=orders_data)
```
### How to create an Online or Offline Feature Store that uses your KMS key for encryption?
Below we create two feature groups, `customers_feature_group` and `orders_feature_group` respectively, and explain how use your KMS key to securely encrypt your data in your online or offline feature store.
### How to create an Online Feature store with your KMS key?
To encrypt data in your online feature store, set `enable_online_store` to be `True` and specify your KMS key as parameter `online_store_kms_key_id`. You will need to substitute your Account number in `arn:aws:kms:us-east-1:123456789012:key/` replacing `123456789012` with your Account number.
```
customers_feature_group.create(
s3_uri=f"s3://{s3_bucket_name}/{prefix}",
record_identifier_name=record_identifier_feature_name,
event_time_feature_name="EventTime",
role_arn=role,
enable_online_store=True,
online_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+ new_kms_key['KeyMetadata']['KeyId']
)
orders_feature_group.create(
s3_uri=f"s3://{s3_bucket_name}/{prefix}",
record_identifier_name=record_identifier_feature_name,
event_time_feature_name="EventTime",
role_arn=role,
enable_online_store=True,
online_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+new_kms_key['KeyMetadata']['KeyId']
)
```
### How to create an Offline Feature store with your KMS key?
Similar to the above, set `enable_online_store` to be `False` and then specify your KMS key as parameter `offline_store_kms_key_id`. You will need to substitute your Account number in `arn:aws:kms:us-east-1:123456789012:key/` replacing `123456789012` with your Account number.
```
customers_feature_group.create(
s3_uri=f"s3://{s3_bucket_name}/{prefix}",
record_identifier_name=record_identifier_feature_name,
event_time_feature_name="EventTime",
role_arn=role,
enable_online_store=False,
offline_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+ new_kms_key['KeyMetadata']['KeyId']
)
orders_feature_group.create(
s3_uri=f"s3://{s3_bucket_name}/{prefix}",
record_identifier_name=record_identifier_feature_name,
event_time_feature_name="EventTime",
role_arn=role,
enable_online_store=False,
offline_store_kms_key_id = 'arn:aws:kms:us-east-1:123456789012:key/'+new_kms_key['KeyMetadata']['KeyId']
)
```
For this example we create an online feature store that encrypts your data using your KMS key.
**Note**: You will need to substitute your Account number in `arn:aws:kms:us-east-1:123456789012:key/` replacing `123456789012` with your Account number.
```
customers_feature_group.create(
s3_uri=f"s3://{s3_bucket_name}/{prefix}",
record_identifier_name=record_identifier_feature_name,
event_time_feature_name="EventTime",
role_arn=role,
enable_online_store=False,
offline_store_kms_key_id="arn:aws:kms:us-east-1:123456789012:key/"
+ new_kms_key["KeyMetadata"]["KeyId"],
)
orders_feature_group.create(
s3_uri=f"s3://{s3_bucket_name}/{prefix}",
record_identifier_name=record_identifier_feature_name,
event_time_feature_name="EventTime",
role_arn=role,
enable_online_store=False,
offline_store_kms_key_id="arn:aws:kms:us-east-1:123456789012:key/"
+ new_kms_key["KeyMetadata"]["KeyId"],
)
```
### How to verify that your KMS key is being used to encrypt your data in your Online or Offline Feature Store?
### Online Store Verification
To demonstrate that your data is being encrypted in your Online store, use your `kms` client from `boto3` to list the grants under your KMS key. It should show 'SageMakerFeatureStore-' and the name of your feature group you created and should list these operations under Operations:`['Decrypt','Encrypt','GenerateDataKey','ReEncryptFrom','ReEncryptTo','CreateGrant','RetireGrant','DescribeKey']`
An alternative way for you to check that your data is encrypted in your Online store is to check [Cloud Trails](https://console.aws.amazon.com/cloudtrail/) and navigate to your account name. Once here, under General details you should see that SSE-KMS encryption is enabled and with your AWS KMS key shown below it. Below is a screenshot showing this:

### Offline Store Verification
To verify that your data in being encrypted in your Offline store, you must navigate to your S3 bucket through the [Console](https://console.aws.amazon.com/s3/home?region=us-east-1) and then navigate to your prefix, offline store, feature group name and into the /data/ folder. Once here, select a parquet file which is the file containing your feature group data. For this example, the directory path in S3 was this:
`Amazon S3/MYBUCKET/PREFIX/123456789012/sagemaker/region/offline-store/customers-feature-group-23-22-44-47/data/year=2021/month=03/day=23/hour=22/20210323T224448Z_IdfObJjhpqLQ5rmG.parquet.`
After selecting the parquet file, navigate to Server-side encryption settings. It should mention that Default encryption is enabled and reference (SSE-KMS) under server-side encryption. If this show, then your data is being encrypted in the offline store. Below is a screenshot of how this should look like in the console:

For this example since we created a secure Online store using our KMS key, below we use `list_grants` to check that our feature group and required grants are present under operations.
```
kms.list_grants(
KeyId="arn:aws:kms:us-east-1:123456789012:key/" + new_kms_key["KeyMetadata"]["KeyId"]
)
```
### Clean Up Resources
Remove the Feature Groups we created.
```
customers_feature_group.delete()
orders_feature_group.delete()
# preserve original sagemaker version
%pip install 'sagemaker=={}'.format(original_version)
```
### Next Steps
For more information on how to use KMS to encrypt your data in your Feature Store, see [Feature Store Security](https://docs.aws.amazon.com/sagemaker/latest/dg/feature-store-security.html). For general information on KMS keys and CMK, see [Customer Managed Keys](https://docs.aws.amazon.com/kms/latest/developerguide/concepts.html#master_keys).
| github_jupyter |
# Hyperparameter tuning
In the previous section, we did not discuss the parameters of random forest
and gradient-boosting. However, there are a couple of things to keep in mind
when setting these.
This notebook gives crucial information regarding how to set the
hyperparameters of both random forest and gradient boosting decision tree
models.
<div class="admonition caution alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;">Caution!</p>
<p class="last">For the sake of clarity, no cross-validation will be used to estimate the
testing error. We are only showing the effect of the parameters
on the validation set of what should be the inner cross-validation.</p>
</div>
## Random forest
The main parameter to tune for random forest is the `n_estimators` parameter.
In general, the more trees in the forest, the better the generalization
performance will be. However, it will slow down the fitting and prediction
time. The goal is to balance computing time and generalization performance when
setting the number of estimators when putting such learner in production.
The `max_depth` parameter could also be tuned. Sometimes, there is no need
to have fully grown trees. However, be aware that with random forest, trees
are generally deep since we are seeking to overfit the learners on the
bootstrap samples because this will be mitigated by combining them.
Assembling underfitted trees (i.e. shallow trees) might also lead to an
underfitted forest.
```
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
data, target = fetch_california_housing(return_X_y=True, as_frame=True)
target *= 100 # rescale the target in k$
data_train, data_test, target_train, target_test = train_test_split(
data, target, random_state=0)
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
param_grid = {
"n_estimators": [10, 20, 30],
"max_depth": [3, 5, None],
}
grid_search = GridSearchCV(
RandomForestRegressor(n_jobs=2), param_grid=param_grid,
scoring="neg_mean_absolute_error", n_jobs=2,
)
grid_search.fit(data_train, target_train)
columns = [f"param_{name}" for name in param_grid.keys()]
columns += ["mean_test_score", "rank_test_score"]
cv_results = pd.DataFrame(grid_search.cv_results_)
cv_results["mean_test_score"] = -cv_results["mean_test_score"]
cv_results[columns].sort_values(by="rank_test_score")
```
We can observe that in our grid-search, the largest `max_depth` together
with the largest `n_estimators` led to the best generalization performance.
## Gradient-boosting decision trees
For gradient-boosting, parameters are coupled, so we cannot set the
parameters one after the other anymore. The important parameters are
`n_estimators`, `max_depth`, and `learning_rate`.
Let's first discuss the `max_depth` parameter.
We saw in the section on gradient-boosting that the algorithm fits the error
of the previous tree in the ensemble. Thus, fitting fully grown trees will
be detrimental.
Indeed, the first tree of the ensemble would perfectly fit (overfit) the data
and thus no subsequent tree would be required, since there would be no
residuals.
Therefore, the tree used in gradient-boosting should have a low depth,
typically between 3 to 8 levels. Having very weak learners at each step will
help reducing overfitting.
With this consideration in mind, the deeper the trees, the faster the
residuals will be corrected and less learners are required. Therefore,
`n_estimators` should be increased if `max_depth` is lower.
Finally, we have overlooked the impact of the `learning_rate` parameter
until now. When fitting the residuals, we would like the tree
to try to correct all possible errors or only a fraction of them.
The learning-rate allows you to control this behaviour.
A small learning-rate value would only correct the residuals of very few
samples. If a large learning-rate is set (e.g., 1), we would fit the
residuals of all samples. So, with a very low learning-rate, we will need
more estimators to correct the overall error. However, a too large
learning-rate tends to obtain an overfitted ensemble,
similar to having a too large tree depth.
```
from sklearn.ensemble import GradientBoostingRegressor
param_grid = {
"n_estimators": [10, 30, 50],
"max_depth": [3, 5, None],
"learning_rate": [0.1, 1],
}
grid_search = GridSearchCV(
GradientBoostingRegressor(), param_grid=param_grid,
scoring="neg_mean_absolute_error", n_jobs=2
)
grid_search.fit(data_train, target_train)
columns = [f"param_{name}" for name in param_grid.keys()]
columns += ["mean_test_score", "rank_test_score"]
cv_results = pd.DataFrame(grid_search.cv_results_)
cv_results["mean_test_score"] = -cv_results["mean_test_score"]
cv_results[columns].sort_values(by="rank_test_score")
```
<div class="admonition caution alert alert-warning">
<p class="first admonition-title" style="font-weight: bold;">Caution!</p>
<p class="last">Here, we tune the <tt class="docutils literal">n_estimators</tt> but be aware that using early-stopping as
in the previous exercise will be better.</p>
</div>
| github_jupyter |
```
from keras import applications
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense
from keras.models import model_from_json
import os, sklearn, pandas, numpy as np, random
from sklearn import svm
import skimage, skimage.io, skimage.filters
import matplotlib.pyplot as plt
from keras.callbacks import TensorBoard
from sklearn.utils import shuffle
import imp
from sklearn.preprocessing import LabelBinarizer
# from pcanet import PCANet
from pcanet import PCANet
import numpy as np
%matplotlib inline
# set cwd back to default
os.chdir('../')
os.getcwd()
# custom scripts
import config # params, constants
import data, models # functions that mutate outr data
# from utils import utils, plot # custom functions, in local environment
import data # src/data.py
dataset = data.init_dataset()
os.listdir('../datasets/models')
```
### load a model
```
# load json and create model
# load json and create model
def load_model(filename, weights):
with open(filename, 'r') as json: # cnn_transfer_augm
loaded_model_json = json.read()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights(weights)
print("Loaded model from disk")
optimizer = optimizers.Adam(lr=0.001)
loaded_model.compile(loss = "categorical_crossentropy", optimizer = optimizer, metrics=['accuracy',
'mean_squared_error','categorical_crossentropy','top_k_categorical_accuracy'])
print('compiled model')
return loaded_model
model_augment = config.dataset_dir + 'models/cnntransfer_augm.json'
model_augment_weights = config.dataset_dir + 'models/cnntransferweights_augmen.h5'
model_default = config.dataset_dir + 'models/cnntransfer.json'
model_default_weights = config.dataset_dir + 'models/cnntransferweights.h5'
# augment = load_model(model_augment, model_augment_weights)
default = load_model(model_default, model_default_weights)
augment = load_model(model_augment, model_augment_weights)
# pick the n classes with the most occuring instances
amt = 5
classes = data.top_classes(dataset.labels, amt)
classes
maxx = 100
max_train = 100
x_test, n = data.extract_topx_classes(dataset, classes, 'test', maxx, max_train)
n
x_test, y_test, n = data.extract_all_test(dataset, x_test)
# y_train, y_test, y_validation = data.labels_to_vectors(dataset, y_train, y_test, y_validation)
y_test = data.one_hot(y_test)
input_shape = y_test.shape[1:] # = shape of an individual image (matrix)
output_length = (y_test[0]).shape[0] # = length of an individual label
output_length
```
## running tests
```
# import sklearn.metrics.confusion_matrix
def evaluate(model):
cvscores = []
scores = model.evaluate(x_test, y_test, verbose=0)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
cvscores.append(scores[1] * 100)
print("%.2f%% (+/- %.2f%%)" % (np.mean(cvscores), np.std(cvscores)))
# evaluate(model_final_augmentation)
import tensorflow as tf
from sklearn.metrics import confusion_matrix
def test1(model, x_test, y_test):
y_pred_class = model.predict(x_test)
# con = tf.confusion_matrix(labels=y_test, predictions=y_pred_class )
# print(con)
y_test_non_category = [ np.argmax(t) for t in y_test ]
y_predict_non_category = [ np.argmax(t) for t in y_pred_class ]
conf_mat = confusion_matrix(y_test_non_category, y_predict_non_category)
print(conf_mat)
return conf_mat
c1 = test1(default, x_test, y_test)
c2 = test1(augment, x_test, y_test)
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_confusion_matrix.html
# comparable but different from: mlxtend.plotting.plot_confusion_matrix
import itertools
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# import some data to play with
iris = datasets.load_iris()
X = iris.data
y = iris.target
class_names = iris.target_names
# Split the data into a training set and a test set
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Run classifier, using a model that is too regularized (C too low) to see
# the impact on the results
classifier = svm.SVC(kernel='linear', C=0.01)
y_pred = classifier.fit(X_train, y_train).predict(X_test)
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
labels = np.array(['Glass','Paper','Cardboard','Plastic','Metal'])
labels = np.array(['Paper', 'Glass', 'Plastic', 'Metal', 'Cardboard'])
from matplotlib import rcParams
rcParams['font.family'] = 'sans-serif'
rcParams['font.sans-serif'] = [
'Times New Roman', 'Tahoma', 'DejaVu Sans', 'Lucida Grande', 'Verdana'
]
rcParams['font.size'] = 12
labels
c_ = c1
c1 = np.array([[29, 1, 1, 24, 5],
[ 0, 41, 2, 1, 16],
[ 0, 11, 34, 1, 14],
[ 0, 0, 0, 55, 5],
[ 0, 5, 0, 4, 51]])
# from mlxtend.plotting import plot_confusion_matrix
plt.figure()
plot_confusion_matrix(c1, labels, title='Confusion matrix - default')
c2 = np.array([[ 3, 2, 5, 85, 5],
[ 0, 76, 19, 2, 3,],
[ 0, 13, 72, 9, 6,],
[ 0, 0, 2, 95, 3,],
[ 0, 36, 22, 5, 37]])
# from mlxtend.plotting import plot_confusion_matrix
plt.figure()
plot_confusion_matrix(c2, labels, title='Confusion matrix - augmented')
```
## T-tests
ttest for the TP per class, between the 2 networks
```
tp_c1 = c1.diagonal()
tp_c2 = c2.diagonal()
print(tp_c1)
print(tp_c2)
from utils import utils
utils.ttest(0.05, tp_c1, tp_c2)
utils.ttest(0.05, tp_c1.flatten(), tp_c2.flatten())
def select_not_diagonal(arr=[]):
a = arr.copy()
np.fill_diagonal(a, -1)
return [x for x in list(a.flatten()) if x > -1]
# everything nog at the diagonal axes is either fp or fn
# with fn or fp depending on the perspective (which class == p)
c1_ = select_not_diagonal(c1)
c2_ = select_not_diagonal(c2)
print(c1_)
print(c2_)
utils.ttest(0.05, c1_, c2_)
def recall_precision(cm=[[]]):
print('label, recall, precision')
total = sum(cm.flatten())
for i, label in enumerate(labels):
# e.g. label = paper
true_paper = cm[i]
tp = cm[i][i] # upper left corner
fp = sum(cm[i]) - tp # upper col minus tp
# vertical col
col = [row[i] for row in cm ]
fn = sum(col) - tp
tn = total - tp - fp - fn
print(label, ':', round(tp * 1./ (tp + fn),3), round(tp * 1./ (tp + fp),3))
# print(round(tp * 1./ (tp + fp),3))
print('c1 - no aug')
recall_precision(c1)
print('c2 - aug')
recall_precision(c2)
```
| github_jupyter |
# Module 2: Playing with pytorch: linear regression
```
import matplotlib.pyplot as plt
%matplotlib inline
import torch
import numpy as np
torch.__version__
```
## Warm-up: Linear regression with numpy
Our model is:
$$
y_t = 2x^1_t-3x^2_t+1, \quad t\in\{1,\dots,30\}
$$
Our task is given the 'observations' $(x_t,y_t)_{t\in\{1,\dots,30\}}$ to recover the weights $w^1=2, w^2=-3$ and the bias $b = 1$.
In order to do so, we will solve the following optimization problem:
$$
\underset{w^1,w^2,b}{\operatorname{argmin}} \sum_{t=1}^{30} \left(w^1x^1_t+w^2x^2_t+b-y_t\right)^2
$$
```
import numpy as np
from numpy.random import random
# generate random input data
x = random((30,2))
# generate labels corresponding to input data x
y = np.dot(x, [2., -3.]) + 1.
w_source = np.array([2., -3.])
b_source = np.array([1.])
print(x.shape)
print(y.shape)
print(np.array([2., -3.]).shape)
print(x[-5:])
print(x[:5])
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
def plot_figs(fig_num, elev, azim, x, y, weights, bias):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(x[:, 0], x[:, 1], y)
ax.plot_surface(np.array([[0, 0], [1, 1]]),
np.array([[0, 1], [0, 1]]),
(np.dot(np.array([[0, 0, 1, 1],
[0, 1, 0, 1]]).T, weights) + bias).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('x_1')
ax.set_ylabel('x_2')
ax.set_zlabel('y')
def plot_views(x, y, w, b):
#Generate the different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, x, y, w, b[0])
plt.show()
plot_views(x, y, w_source, b_source)
```
In vector form, we define:
$$
\hat{y}_t = {\bf w}^T{\bf x}_t+b
$$
and we want to minimize the loss given by:
$$
loss = \sum_t\underbrace{\left(\hat{y}_t-y_t \right)^2}_{loss_t}.
$$
To minimize the loss we first compute the gradient of each $loss_t$:
\begin{eqnarray*}
\frac{\partial{loss_t}}{\partial w^1} &=& 2x^1_t\left({\bf w}^T{\bf x}_t+b-y_t \right)\\
\frac{\partial{loss_t}}{\partial w^2} &=& 2x^2_t\left({\bf w}^T{\bf x}_t+b-y_t \right)\\
\frac{\partial{loss_t}}{\partial b} &=& 2\left({\bf w}^T{\bf x}_t+b-y_t \right)
\end{eqnarray*}
Note that the actual gradient of the loss is given by:
$$
\frac{\partial{loss}}{\partial w^1} =\sum_t \frac{\partial{loss_t}}{\partial w^1},\quad
\frac{\partial{loss}}{\partial w^2} =\sum_t \frac{\partial{loss_t}}{\partial w^2},\quad
\frac{\partial{loss}}{\partial b} =\sum_t \frac{\partial{loss_t}}{\partial b}
$$
For one epoch, **(Batch) Gradient Descent** updates the weights and bias as follows:
\begin{eqnarray*}
w^1_{new}&=&w^1_{old}-\alpha\frac{\partial{loss}}{\partial w^1} \\
w^2_{new}&=&w^2_{old}-\alpha\frac{\partial{loss}}{\partial w^2} \\
b_{new}&=&b_{old}-\alpha\frac{\partial{loss}}{\partial b},
\end{eqnarray*}
and then we run several epochs.
```
# randomly initialize learnable weights and bias
w_init = random(2)
b_init = random(1)
w = w_init
b = b_init
print("initial values of the parameters:", w, b )
# our model forward pass
def forward(x):
return x.dot(w)+b
# Loss function
def loss(x, y):
y_pred = forward(x)
return (y_pred - y)**2
print("initial loss:", np.sum([loss(x_val,y_val) for x_val, y_val in zip(x, y)]) )
# compute gradient
def gradient(x, y): # d_loss/d_w, d_loss/d_c
return 2*(x.dot(w)+b - y)*x, 2 * (x.dot(w)+b - y)
learning_rate = 1e-2
# Training loop
for epoch in range(10):
grad_w = np.array([0,0])
grad_b = np.array(0)
l = 0
for x_val, y_val in zip(x, y):
grad_w = np.add(grad_w,gradient(x_val, y_val)[0])
grad_b = np.add(grad_b,gradient(x_val, y_val)[1])
l += loss(x_val, y_val)
w = w - learning_rate * grad_w
b = b - learning_rate * grad_b
print("progress:", "epoch:", epoch, "loss",l[0])
# After training
print("estimation of the parameters:", w, b)
plot_views(x, y, w, b)
```
## Linear regression with tensors
```
dtype = torch.FloatTensor
print(dtype)
# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
x_t = torch.from_numpy(x).type(dtype)
y_t = torch.from_numpy(y).type(dtype).unsqueeze(1)
print(y.shape)
print(torch.from_numpy(y).type(dtype).shape)
print(y_t.shape)
```
This is an implementation of **(Batch) Gradient Descent** with tensors.
Note that in the main loop, the functions loss_t and gradient_t are always called with the same inputs: they can easily be incorporated into the loop (we'll do that below).
```
w_init_t = torch.from_numpy(w_init).type(dtype)
b_init_t = torch.from_numpy(b_init).type(dtype)
w_t = w_init_t.clone()
w_t.unsqueeze_(1)
b_t = b_init_t.clone()
b_t.unsqueeze_(1)
print("initial values of the parameters:\n", w_t, b_t )
# our model forward pass
def forward_t(x):
return x.mm(w_t)+b_t
# Loss function
def loss_t(x, y):
y_pred = forward_t(x)
return (y_pred - y).pow(2).sum()
# compute gradient
def gradient_t(x, y): # d_loss/d_w, d_loss/d_c
return 2*torch.mm(torch.t(x),x.mm(w_t)+b_t - y), 2 * (x.mm(w_t)+b_t - y).sum()
learning_rate = 1e-2
for epoch in range(10):
l_t = loss_t(x_t,y_t)
grad_w, grad_b = gradient_t(x_t,y_t)
w_t = w_t-learning_rate*grad_w
b_t = b_t-learning_rate*grad_b
print("progress:", "epoch:", epoch, "loss",l_t)
# After training
print("estimation of the parameters:", w_t, b_t )
```
## Linear regression with Autograd
```
# Setting requires_grad=True indicates that we want to compute gradients with
# respect to these Tensors during the backward pass.
w_v = w_init_t.clone().unsqueeze(1)
w_v.requires_grad_(True)
b_v = b_init_t.clone().unsqueeze(1)
b_v.requires_grad_(True)
print("initial values of the parameters:", w_v.data, b_v.data )
```
An implementation of **(Batch) Gradient Descent** without computing explicitly the gradient and using autograd instead.
```
for epoch in range(10):
y_pred = x_t.mm(w_v)+b_v
loss = (y_pred - y_t).pow(2).sum()
# Use autograd to compute the backward pass. This call will compute the
# gradient of loss with respect to all Variables with requires_grad=True.
# After this call w.grad and b.grad will be tensors holding the gradient
# of the loss with respect to w and b respectively.
loss.backward()
# Update weights using gradient descent. For this step we just want to mutate
# the values of w_v and b_v in-place; we don't want to build up a computational
# graph for the update steps, so we use the torch.no_grad() context manager
# to prevent PyTorch from building a computational graph for the updates
with torch.no_grad():
w_v -= learning_rate * w_v.grad
b_v -= learning_rate * b_v.grad
# Manually zero the gradients after updating weights
# otherwise gradients will be acumulated after each .backward()
w_v.grad.zero_()
b_v.grad.zero_()
print("progress:", "epoch:", epoch, "loss",loss.data.item())
# After training
print("estimation of the parameters:\n", w_v.data, b_v.data.t() )
```
## Linear regression with neural network
An implementation of **(Batch) Gradient Descent** using the nn package. Here we have a super simple model with only one layer and no activation function!
```
# Use the nn package to define our model as a sequence of layers. nn.Sequential
# is a Module which contains other Modules, and applies them in sequence to
# produce its output. Each Linear Module computes output from input using a
# linear function, and holds internal Variables for its weight and bias.
model = torch.nn.Sequential(
torch.nn.Linear(2, 1),
)
for m in model.children():
m.weight.data = w_init_t.clone().unsqueeze(0)
m.bias.data = b_init_t.clone()
# The nn package also contains definitions of popular loss functions; in this
# case we will use Mean Squared Error (MSE) as our loss function.
loss_fn = torch.nn.MSELoss(reduction='sum')
# switch to train mode
model.train()
for epoch in range(10):
# Forward pass: compute predicted y by passing x to the model. Module objects
# override the __call__ operator so you can call them like functions. When
# doing so you pass a Variable of input data to the Module and it produces
# a Variable of output data.
y_pred = model(x_t)
# Note this operation is equivalent to: pred = model.forward(x_v)
# Compute and print loss. We pass Variables containing the predicted and true
# values of y, and the loss function returns a Variable containing the
# loss.
loss = loss_fn(y_pred, y_t)
# Zero the gradients before running the backward pass.
model.zero_grad()
# Backward pass: compute gradient of the loss with respect to all the learnable
# parameters of the model. Internally, the parameters of each Module are stored
# in Variables with requires_grad=True, so this call will compute gradients for
# all learnable parameters in the model.
loss.backward()
# Update the weights using gradient descent. Each parameter is a Tensor, so
# we can access its data and gradients like we did before.
with torch.no_grad():
for param in model.parameters():
param.data -= learning_rate * param.grad
print("progress:", "epoch:", epoch, "loss",loss.data.item())
# After training
print("estimation of the parameters:")
for param in model.parameters():
print(param)
```
Last step, we use directly the optim package to update the weights and bias.
```
model = torch.nn.Sequential(
torch.nn.Linear(2, 1),
)
for m in model.children():
m.weight.data = w_init_t.clone().unsqueeze(0)
m.bias.data = b_init_t.clone()
loss_fn = torch.nn.MSELoss(reduction='sum')
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for epoch in range(10):
y_pred = model(x_t)
loss = loss_fn(y_pred, y_t)
print("progress:", "epoch:", epoch, "loss",loss.item())
# print("progress:", "epoch:", epoch, "loss",loss)
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# After training
print("estimation of the parameters:")
for param in model.parameters():
print(param)
```
## Remark
This problem can be solved in 3 lines of code!
```
xb_t = torch.cat((x_t,torch.ones(30).unsqueeze(1)),1)
# print(xb_t)
sol, _ =torch.lstsq(y_t,xb_t)
print(sol[:3])
```
## Exercise: Play with the code
Change the number of samples from 30 to 300. What happens? How to correct it?
```
x = random((300,2))
y = np.dot(x, [2., -3.]) + 1.
x_t = torch.from_numpy(x).type(dtype)
y_t = torch.from_numpy(y).type(dtype).unsqueeze(1)
model = torch.nn.Sequential(
torch.nn.Linear(2, 1),
)
for m in model.children():
m.weight.data = w_init_t.clone().unsqueeze(0)
m.bias.data = b_init_t.clone()
loss_fn = torch.nn.MSELoss(reduction = 'mean')
model.train()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
for epoch in range(10000):
y_pred = model(x_t)
loss = loss_fn(y_pred, y_t)
if epoch%500==499:
print("progress:", "epoch:", epoch+1, "loss",loss.item())
# Zero gradients, perform a backward pass, and update the weights.
optimizer.zero_grad()
loss.backward()
optimizer.step()
# After training
print("estimation of the parameters:")
for param in model.parameters():
print(param)
```
| github_jupyter |
GHCN V2 Temperatures ANOM (C) CR 1200KM 1880-present
GLOBAL Temperature Anomalies in .01 C base period: 1951-1980
http://climatecode.org/
```
import os
import git
if not os.path.exists('ccc-gistemp'):
git.Git().clone('https://github.com/ClimateCodeFoundation/ccc-gistemp.git')
if not os.path.exists('madqc'):
git.Git().clone('https://github.com/ClimateCodeFoundation/madqc.git')
```
It seems that
http://data.giss.nasa.gov/gistemp/sources_v3/GISTEMPv3_sources.tar.gz
and
http://data.giss.nasa.gov/pub/gistemp/SBBX.ERSST.gz
are down, so let's use a local copy instead.
```
!mkdir -p ccc-gistemp/input
!cp data/GISTEMPv3_sources.tar.gz data/SBBX.ERSST.gz ccc-gistemp/input
%cd ccc-gistemp/
```
We don't really need `pypy` for the fetch phase, but the code is Python 2 and the notebook is Python 3, so this is just a lazy way to call py2k code from a py3k notebook ;-p
PS: we are also using the International Surface Temperature Initiative data (ISTI).
```
!pypy tool/fetch.py isti
```
QC the ISTI data.
```
!../madqc/mad.py --progress input/isti.merged.dat
```
We need to copy the ISTI data into the `input` directory.
```
!cp isti.merged.qc.dat input/isti.merged.qc.dat
!cp input/isti.merged.inv input/isti.merged.qc.inv
```
Here is where `pypy` is really needed, this step takes ~35 minutes on valina `python` but only ~100 seconds on `pypy`.
```
!pypy tool/run.py -p 'data_sources=isti.merged.qc.dat;element=TAVG' -s 0-1,3-5
```
Python `gistemp` saves the results in the same format as the Fortran program but it ships with `gistemp2csv.py` to make it easier to read the data with `pandas`.
```
!pypy tool/gistemp2csv.py result/*.txt
import pandas as pd
df = pd.read_csv(
'result/landGLB.Ts.GHCN.CL.PA.csv',
skiprows=3,
index_col=0,
na_values=('*****', '****'),
)
```
Let's use `sklearn` to compute the full trend...
```
from sklearn import linear_model
from sklearn.metrics import mean_squared_error, r2_score
reg0 = linear_model.LinearRegression()
series0 = df['J-D'].dropna()
y = series0.values
X = series0.index.values[:, None]
reg0.fit(X, y)
y_pred0 = reg0.predict(X)
R2_0 = mean_squared_error(y, y_pred0)
var0 = r2_score(y, y_pred0)
```
and the past 30 years trend.
```
reg1 = linear_model.LinearRegression()
series1 = df['J-D'].dropna().iloc[-30:]
y = series1.values
X = series1.index.values[:, None]
reg1.fit(X, y)
y_pred1 = reg1.predict(X)
R2_1 = mean_squared_error(y[-30:], y_pred1)
var1 = r2_score(y[-30:], y_pred1)
%matplotlib inline
ax = df.plot.line(y='J-D', figsize=(9, 9), legend=None)
ax.plot(series0.index, y_pred0, 'r--')
ax.plot(series1.index, y_pred1, 'r')
ax.set_xlim([1879, 2018])
leg = f"""Trend in ℃/century (R²)
Full: {reg0.coef_[0]*100:0.2f} ({var0:0.2f})
30-year: {reg1.coef_[0]*100:0.2f} ({var1:0.2f})
"""
ax.text(0.10, 0.75, leg, transform=ax.transAxes);
```
| github_jupyter |
# Inheriting from Unit
### Abstract attributes and methods

**A Unit subclass has class attributes that dictate how an instance is initialized:**
* `_BM` : dict[str, float] Bare module factors for each purchase cost item.
* `_units` : [dict] Units of measure for the `design_results` items.
* `_N_ins`=1 : [int] Expected number of input streams.
* `_N_outs`=2 : [int] Expected number of output streams.
* `_ins_size_is_fixed`=True : [bool] Whether the number of streams in ins is fixed.
* `_outs_size_is_fixed`=True : [bool] Whether the number of streams in outs is fixed.
* `_N_heat_utilities`=0 : [int] Number of heat utility objects in the `heat_utilities` tuple.
* `_stream_link_options`=None : [StreamLinkOptions] Options for linking streams.
* `auxiliary_unit_names`=() : tuple[str] Name of attributes that are auxiliary units.
* `_graphics` : [biosteam Graphics] A Graphics object for diagram representation. Defaults to a box diagram.
* `line` : [str] Label for the unit operation in a diagram. Defaults to the class name.
**Abstract methods are used to setup stream conditions, run heat and mass balances, find design requirements, and cost the unit:**
* `_setup()` : Called before System convergece to initialize constant data and setup stream conditions.
* `_run()` : Called during System convergece to specify `outs` streams.
* `_design()` : Called after System convergence to find design requirements.
* `_cost()` : Called after `_design` to find cost requirements.
**These abstract methods will rely on the following instance attributes:**
* `ins` : Ins[Stream] Input streams.
* `outs` : Outs[Stream] Output streams.
* `power_utility` : [PowerUtility] Can find electricity rate requirement.
* `heat_utilities` : tuple[HeatUtility] Can find cooling and heating requirements.
* `design_results` : [dict] All design requirements.
* `purchase_costs` : [dict] Itemized purchase costs.
* `thermo` : [Thermo] The thermodynamic property package used by the unit.
### Subclass example
The following example depicts inheritance from Unit by creating a new Boiler class:
```
import biosteam as bst
from math import ceil
class Boiler(bst.Unit):
"""
Create a Boiler object that partially boils the feed.
Parameters
----------
ins : stream
Inlet fluid.
outs : stream sequence
* [0] vapor product
* [1] liquid product
V : float
Molar vapor fraction.
P : float
Operating pressure [Pa].
"""
# Note that the documentation does not include `ID` or `thermo` in the parameters.
# This is OK, and most subclasses in BioSTEAM are documented this way too.
# Documentation for all unit operations should include the inlet and outlet streams
# listed by index. If there is only one stream in the inlets (or outlets), there is no
# need to list out by index. The types for the `ins` and `outs` should be either
# `stream sequence` for multiple streams, or `stream` for a single stream.
# Any additional arguments to the unit should also be listed (e.g. V, and P).
_N_ins = 1
_N_outs = 2
_N_heat_utilities = 1
_BM = {'Evaporators': 2.45}
_units = {'Area': 'm^2'}
def __init__(self, ID='', ins=None, outs=(), thermo=None, *, V, P):
bst.Unit.__init__(self, ID, ins, outs, thermo)
# Initialize MultiStream object to perform vapor-liquid equilibrium later
# NOTE: ID is None to not register it in the flowsheet
self._multistream = bst.MultiStream(None, thermo=self.thermo)
self.V = V #: Molar vapor fraction.
self.P = P #: Operating pressure [Pa].
def _setup(self):
gas, liq = self.outs
# Initialize top stream as a gas
gas.phase = 'g'
# Initialize bottom stream as a liquid
liq.phase = 'l'
def _run(self):
feed = self.ins[0]
gas, liq = self.outs
# Perform vapor-liquid equilibrium
ms = self._multistream
ms.imol['l'] = feed.mol
ms.vle(V=self.V, P=self.P)
# Update output streams
gas.mol[:] = ms.imol['g']
liq.mol[:] = ms.imol['l']
gas.T = liq.T = ms.T
gas.P = liq.P = ms.P
# Reset flow to prevent accumulation in multiple simulations
ms.empty()
def _design(self):
# Calculate heat utility requirement (please read docs for HeatUtility objects)
T_operation = self._multistream.T
duty = self.H_out - self.H_in
if duty < 0:
raise RuntimeError(f'{repr(self)} is cooling.')
hu = self.heat_utilities[0]
hu(duty, T_operation)
# Temperature of utility at entrance
T_utility = hu.inlet_utility_stream.T
# Temeperature gradient
dT = T_utility - T_operation
# Heat transfer coefficient kJ/(hr*m2*K)
U = 8176.699
# Area requirement (m^2)
A = duty/(U*dT)
# Maximum area per unit
A_max = 743.224
# Number of units
N = ceil(A/A_max)
# Design requirements are stored here
self.design_results['Area'] = A/N
self.design_results['N'] = N
def _cost(self):
A = self.design_results['Area']
N = self.design_results['N']
# Long-tube vertical boiler cost correlation from
# "Product process and design". Warren et. al. (2016) Table 22.32, pg 592
purchase_cost = N*bst.CE*3.086*A**0.55
# Itemized purchase costs are stored here
self.purchase_costs['Boilers'] = purchase_cost
```
### Simulation test
```
import biosteam as bst
bst.settings.set_thermo(['Water'])
water = bst.Stream('water', Water=300)
B1 = Boiler('B1', ins=water, outs=('gas', 'liq'),
V=0.5, P=101325)
B1.diagram()
B1.show()
B1.simulate()
B1.show()
B1.results()
```
### Graphviz attributes
All [graphviz](https://graphviz.readthedocs.io/en/stable/manual.html) attributes for generating a diagram are stored in `_graphics` as a Graphics object. One Graphics object is generated for each Unit subclass:
```
graphics = Boiler._graphics
edge_in = graphics.edge_in
edge_out = graphics.edge_out
node = graphics.node
# Attributes correspond to each inlet stream respectively
# For example: Attributes for B1.ins[0] would correspond to edge_in[0]
edge_in
# Attributes correspond to each outlet stream respectively
# For example: Attributes for B1.outs[0] would correspond to edge_out[0]
edge_out
node # The node represents the actual unit
```
These attributes can be changed to the user's liking:
```
edge_out[0]['tailport'] = 'n'
edge_out[1]['tailport'] = 's'
node['width'] = '1'
node['height'] = '1.2'
B1.diagram()
```
It is also possible to dynamically adjust node and edge attributes by setting the `tailor_node_to_unit` attribute:
```
def tailor_node_to_unit(node, unit):
feed = unit.ins[0]
if not feed.F_mol:
node['name'] += '\n-empty-'
graphics.tailor_node_to_unit = tailor_node_to_unit
B1.diagram()
B1.ins[0].empty()
B1.diagram()
```
NOTE: The example implementation of the `tailor_node_to_unit` function is not suggested; best to keep diagrams simple.
| github_jupyter |
```
# !pip install ray[tune]
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from sklearn.metrics import mean_squared_error
from hyperopt import hp
from ray import tune
from hyperopt import fmin, tpe, hp,Trials, space_eval
import scipy.stats
df = pd.read_csv("../../Data/Raw/flightLogData.csv")
plt.figure(figsize=(20, 10))
plt.plot(df.Time, df['Altitude'], linewidth=2, color="r", label="Altitude")
plt.plot(df.Time, df['Vertical_velocity'], linewidth=2, color="y", label="Vertical_velocity")
plt.plot(df.Time, df['Vertical_acceleration'], linewidth=2, color="b", label="Vertical_acceleration")
plt.legend()
plt.show()
temp_df = df[['Altitude', "Vertical_velocity", "Vertical_acceleration"]]
noise = np.random.normal(2, 5, temp_df.shape)
noisy_df = temp_df + noise
noisy_df['Time'] = df['Time']
plt.figure(figsize=(20, 10))
plt.plot(noisy_df.Time, noisy_df['Altitude'], linewidth=2, color="r", label="Altitude")
plt.plot(noisy_df.Time, noisy_df['Vertical_velocity'], linewidth=2, color="y", label="Vertical_velocity")
plt.plot(noisy_df.Time, noisy_df['Vertical_acceleration'], linewidth=2, color="b", label="Vertical_acceleration")
plt.legend()
plt.show()
```
## Altitude
```
q = 0.001
A = np.array([[1.0, 0.1, 0.005], [0, 1.0, 0.1], [0, 0, 1]])
H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]])
P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
# R = np.array([[0.5, 0.0], [0.0, 0.0012]])
# Q = np.array([[q, 0.0, 0.0], [0.0, q, 0.0], [0.0, 0.0, q]])
I = np.identity(3)
x_hat = np.array([[0.0], [0.0], [0.0]])
Y = np.array([[0.0], [0.0]])
def kalman_update(param):
r1, r2, q1 = param['r1'], param['r2'], param['q1']
R = np.array([[r1, 0.0], [0.0, r2]])
Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]])
A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]])
H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]])
P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
I = np.identity(3)
x_hat = np.array([[0.0], [0.0], [0.0]])
Y = np.array([[0.0], [0.0]])
new_altitude = []
new_acceleration = []
new_velocity = []
for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']):
Z = np.array([[altitude], [az]])
x_hat_minus = np.dot(A, x_hat)
P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q
K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R)))
Y = Z - np.dot(H, x_hat_minus)
x_hat = x_hat_minus + np.dot(K, Y)
P = np.dot((I - np.dot(K, H)), P_minus)
Y = Z - np.dot(H, x_hat_minus)
new_altitude.append(float(x_hat[0]))
new_velocity.append(float(x_hat[1]))
new_acceleration.append(float(x_hat[2]))
return new_altitude
def objective_function(param):
r1, r2, q1 = param['r1'], param['r2'], param['q1']
R = np.array([[r1, 0.0], [0.0, r2]])
Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]])
A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]])
H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]])
P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
I = np.identity(3)
x_hat = np.array([[0.0], [0.0], [0.0]])
Y = np.array([[0.0], [0.0]])
new_altitude = []
new_acceleration = []
new_velocity = []
for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']):
Z = np.array([[altitude], [az]])
x_hat_minus = np.dot(A, x_hat)
P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q
K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R)))
Y = Z - np.dot(H, x_hat_minus)
x_hat = x_hat_minus + np.dot(K, Y)
P = np.dot((I - np.dot(K, H)), P_minus)
Y = Z - np.dot(H, x_hat_minus)
new_altitude.append(float(x_hat[0]))
new_velocity.append(float(x_hat[1]))
new_acceleration.append(float(x_hat[2]))
return mean_squared_error(df['Altitude'], new_altitude)
# space = {
# "r1": hp.choice("r1", np.arange(0.01, 90, 0.005)),
# "r2": hp.choice("r2", np.arange(0.01, 90, 0.005)),
# "q1": hp.choice("q1", np.arange(0.0001, 0.0009, 0.0001))
# }
len(np.arange(0.00001, 0.09, 0.00001))
space = {
"r1": hp.choice("r1", np.arange(0.001, 90, 0.001)),
"r2": hp.choice("r2", np.arange(0.001, 90, 0.001)),
"q1": hp.choice("q1", np.arange(0.00001, 0.09, 0.00001))
}
# Initialize trials object
trials = Trials()
best = fmin(fn=objective_function, space = space, algo=tpe.suggest, max_evals=100, trials=trials )
print(best)
# -> {'a': 1, 'c2': 0.01420615366247227}
print(space_eval(space, best))
# -> ('case 2', 0.01420615366247227}
d1 = space_eval(space, best)
objective_function(d1)
%%timeit
objective_function({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75})
objective_function({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75})
y = kalman_update(d1)
current = kalman_update({'q1': 0.06626, 'r1': 0.25, 'r2': 0.75})
plt.figure(figsize=(20, 10))
plt.plot(noisy_df.Time, df['Altitude'], linewidth=2, color="r", label="Actual")
plt.plot(noisy_df.Time, current, linewidth=2, color="g", label="ESP32")
plt.plot(noisy_df.Time, noisy_df['Altitude'], linewidth=2, color="y", label="Noisy")
plt.plot(noisy_df.Time, y, linewidth=2, color="b", label="Predicted")
plt.legend()
plt.show()
def kalman_update_return_velocity(param):
r1, r2, q1 = param['r1'], param['r2'], param['q1']
R = np.array([[r1, 0.0], [0.0, r2]])
Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]])
A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]])
H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]])
P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
I = np.identity(3)
x_hat = np.array([[0.0], [0.0], [0.0]])
Y = np.array([[0.0], [0.0]])
new_altitude = []
new_acceleration = []
new_velocity = []
for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']):
Z = np.array([[altitude], [az]])
x_hat_minus = np.dot(A, x_hat)
P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q
K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R)))
Y = Z - np.dot(H, x_hat_minus)
x_hat = x_hat_minus + np.dot(K, Y)
P = np.dot((I - np.dot(K, H)), P_minus)
Y = Z - np.dot(H, x_hat_minus)
new_altitude.append(float(x_hat[0]))
new_velocity.append(float(x_hat[1]))
new_acceleration.append(float(x_hat[2]))
return new_velocity
def objective_function(param):
r1, r2, q1 = param['r1'], param['r2'], param['q1']
R = np.array([[r1, 0.0], [0.0, r2]])
Q = np.array([[q1, 0.0, 0.0], [0.0, q1, 0.0], [0.0, 0.0, q1]])
A = np.array([[1.0, 0.05, 0.00125], [0, 1.0, 0.05], [0, 0, 1]])
H = np.array([[1.0, 0.0, 0.0],[ 0.0, 0.0, 1.0]])
P = np.array([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
I = np.identity(3)
x_hat = np.array([[0.0], [0.0], [0.0]])
Y = np.array([[0.0], [0.0]])
new_altitude = []
new_acceleration = []
new_velocity = []
for altitude, az in zip(noisy_df['Altitude'], noisy_df['Vertical_acceleration']):
Z = np.array([[altitude], [az]])
x_hat_minus = np.dot(A, x_hat)
P_minus = np.dot(np.dot(A, P), np.transpose(A)) + Q
K = np.dot(np.dot(P_minus, np.transpose(H)), np.linalg.inv((np.dot(np.dot(H, P_minus), np.transpose(H)) + R)))
Y = Z - np.dot(H, x_hat_minus)
x_hat = x_hat_minus + np.dot(K, Y)
P = np.dot((I - np.dot(K, H)), P_minus)
Y = Z - np.dot(H, x_hat_minus)
new_altitude.append(float(x_hat[0]))
new_velocity.append(float(x_hat[1]))
new_acceleration.append(float(x_hat[2]))
return mean_squared_error(df['Vertical_velocity'], new_velocity)
space = {
"r1": hp.choice("r1", np.arange(0.001, 90, 0.001)),
"r2": hp.choice("r2", np.arange(0.001, 90, 0.001)),
"q1": hp.choice("q1", np.arange(0.00001, 0.09, 0.00001))
}
# Initialize trials object
trials = Trials()
best = fmin(fn=objective_function, space = space, algo=tpe.suggest, max_evals=100, trials=trials )
print(best)
print(space_eval(space, best))
d2 = space_eval(space, best)
objective_function(d2)
y = kalman_update_return_velocity(d2)
current = kalman_update_return_velocity({'q1': 0.0013, 'r1': 0.25, 'r2': 0.65})
previous = kalman_update_return_velocity({'q1': 0.08519, 'r1': 4.719, 'r2': 56.443})
plt.figure(figsize=(20, 10))
plt.plot(noisy_df.Time, df['Vertical_velocity'], linewidth=2, color="r", label="Actual")
plt.plot(noisy_df.Time, current, linewidth=2, color="g", label="ESP32")
plt.plot(noisy_df.Time, previous, linewidth=2, color="c", label="With previous data")
plt.plot(noisy_df.Time, noisy_df['Vertical_velocity'], linewidth=2, color="y", label="Noisy")
plt.plot(noisy_df.Time, y, linewidth=2, color="b", label="Predicted")
plt.legend()
plt.show()
```
| github_jupyter |
<a href="https://colab.research.google.com/github/naufalhisyam/TurbidityPrediction-thesis/blob/main/train_model_DenseNet121_CV.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
import os
import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
!pip install tensorflow-addons
import tensorflow_addons as tfa
from sklearn.model_selection import KFold, train_test_split
!git clone https://github.com/naufalhisyam/TurbidityPrediction-thesis.git
os.chdir('/content/TurbidityPrediction-thesis')
images = pd.read_csv(r'./Datasets/0degree_lowrange/0degInfo.csv') #load dataset info
train_df, test_df = train_test_split(images, train_size=0.9, shuffle=True, random_state=1)
Y = train_df[['Turbidity']]
VALIDATION_R2 = []
VALIDATION_LOSS = []
VALIDATION_MSE = []
VALIDATION_MAE = []
name = 'ResNet_0deg_withTL'
save_dir = f'saved_models/{name}'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
def get_model():
#Create model
base_model = tf.keras.applications.ResNet50(include_top=False, weights='imagenet',
input_shape=(224, 224, 3), pooling='avg')
out = base_model.output
prediction = tf.keras.layers.Dense(1, activation="linear")(out)
model = tf.keras.Model(inputs = base_model.input, outputs = prediction)
#Compile the model
return model
def get_model_name(k):
return 'resnet_'+str(k)+'.h5'
tf.test.gpu_device_name()
train_generator = tf.keras.preprocessing.image.ImageDataGenerator(
horizontal_flip=True
)
test_generator = tf.keras.preprocessing.image.ImageDataGenerator(
horizontal_flip=True
)
kf = KFold(n_splits = 5)
fold_var = 1
for train_index, val_index in kf.split(np.zeros(Y.shape[0]),Y):
training_data = train_df.iloc[train_index]
validation_data = train_df.iloc[val_index]
train_images = train_generator.flow_from_dataframe(training_data,
x_col = "Filepath", y_col = "Turbidity",
target_size=(224, 224), color_mode='rgb',
class_mode = "raw", shuffle = True)
val_images = train_generator.flow_from_dataframe(validation_data,
x_col = "Filepath", y_col = "Turbidity",
target_size=(224, 224), color_mode='rgb',
class_mode = "raw", shuffle = True)
# CREATE NEW MODEL
model = get_model()
# COMPILE NEW MODEL
opt = tf.keras.optimizers.Adam(learning_rate=1e-4, decay=1e-6)
model.compile(loss=tf.keras.losses.Huber(), optimizer=opt, metrics=['mae','mse', tfa.metrics.RSquare(name="R2")])
# CREATE CALLBACKS
checkpoint_filepath = f'{save_dir}/{get_model_name(fold_var)}'
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_filepath,
monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]
# There can be other callbacks, but just showing one because it involves the model name
# This saves the best model
# FIT THE MODEL
history = model.fit(train_images, epochs=100,
callbacks=callbacks_list,
validation_data=val_images)
# LOAD BEST MODEL to evaluate the performance of the model
model.load_weights(f"{save_dir}/resnet_"+str(fold_var)+".h5")
results = model.evaluate(val_images)
results = dict(zip(model.metrics_names,results))
VALIDATION_R2.append(results['R2'])
VALIDATION_MAE.append(results['mae'])
VALIDATION_MSE.append(results['mse'])
VALIDATION_LOSS.append(results['loss'])
tf.keras.backend.clear_session()
fold_var += 1
train_images = train_generator.flow_from_dataframe(
dataframe=train_df,
x_col='Filepath',
y_col='Turbidity',
target_size=(224, 224),
color_mode='rgb',
class_mode='raw',
shuffle=False,
)
test_images = test_generator.flow_from_dataframe(
dataframe=test_df,
x_col='Filepath',
y_col='Turbidity',
target_size=(224, 224),
color_mode='rgb',
class_mode='raw',
shuffle=False
)
min_fold = min(range(len(VALIDATION_LOSS)), key=VALIDATION_LOSS.__getitem__) + 1
model = get_model()
model.load_weights(f"{save_dir}/resnet_"+str(min_fold)+".h5")
opt = tf.keras.optimizers.Adam(learning_rate=1e-3, decay=1e-6)
model.compile(loss=tf.keras.losses.Huber(), optimizer=opt, metrics=['mae','mse', tfa.metrics.RSquare(name="R2")])
test_pred = np.squeeze(model.predict(test_images))
test_true = test_images.labels
test_residuals = test_true - test_pred
train_pred = np.squeeze(model.predict(train_images))
train_true = train_images.labels
train_residuals = train_true - train_pred
train_score = model.evaluate(train_images)
test_score = model.evaluate(test_images)
print('test ',test_score)
print('train ', train_score)
f, axs = plt.subplots(1, 2, figsize=(8,6), gridspec_kw={'width_ratios': [4, 1]})
f.suptitle(f'Residual Plot - {name}', fontsize=13, fontweight='bold', y=0.92)
axs[0].scatter(train_pred,train_residuals, label='Train Set', alpha=0.75, color='tab:blue')
axs[0].scatter(test_pred,test_residuals, label='Test Set', alpha=0.75, color='tab:orange')
axs[0].set_ylabel('Residual (NTU)')
axs[0].set_xlabel('Predicted Turbidity (NTU)')
axs[0].axhline(0, color='black')
axs[0].legend()
axs[0].grid()
axs[1].hist(train_residuals, bins=50, orientation="horizontal", density=True, alpha=0.9, color='tab:blue')
axs[1].hist(test_residuals, bins=50, orientation="horizontal", density=True, alpha=0.75, color='tab:orange')
axs[1].axhline(0, color='black')
axs[1].set_xlabel('Distribution')
axs[1].yaxis.tick_right()
axs[1].grid(axis='y')
plt.subplots_adjust(wspace=0.05)
plt.savefig(f'{save_dir}/residualPlot_{name}.png', dpi=150)
plt.show()
fig, ax = plt.subplots(1,2,figsize=(13,6))
fig.suptitle(f'Nilai Prediksi vs Observasi - {name}', fontsize=13, fontweight='bold', y=0.96)
ax[0].scatter(test_true,test_pred, label=f'$Test\ R^2=${round(test_score[3],3)}',color='tab:orange', alpha=0.75)
theta = np.polyfit(test_true, test_pred, 1)
y_line = theta[1] + theta[0] * test_true
ax[0].plot([test_true.min(), test_true.max()], [y_line.min(), y_line.max()],'k--', lw=2,label='best fit')
ax[0].plot([test_true.min(), test_true.max()], [test_true.min(), test_true.max()], 'k--', lw=2, label='identity',color='dimgray')
ax[0].set_xlabel('Measured Turbidity (NTU)')
ax[0].set_ylabel('Predicted Turbidity (NTU)')
ax[0].set_title(f'Test Set', fontsize=10, fontweight='bold')
ax[0].set_xlim([0, 130])
ax[0].set_ylim([0, 130])
ax[0].grid()
ax[0].legend()
ax[1].scatter(train_true,train_pred, label=f'$Train\ R^2=${round(train_score[3],3)}', color='tab:blue', alpha=0.75)
theta2 = np.polyfit(train_true, train_pred, 1)
y_line2 = theta2[1] + theta2[0] * train_true
ax[1].plot([train_true.min(), train_true.max()], [y_line2.min(), y_line2.max()],'k--', lw=2,label='best fit')
ax[1].plot([train_true.min(), train_true.max()], [train_true.min(),train_true.max()], 'k--', lw=2, label='identity',color='dimgray')
ax[1].set_xlabel('Measured Turbidity (NTU)')
ax[1].set_ylabel('Predicted Turbidity (NTU)')
ax[1].set_title(f'Train Set', fontsize=10, fontweight='bold')
ax[1].set_xlim([0, 130])
ax[1].set_ylim([0, 130])
ax[1].grid()
ax[1].legend()
plt.savefig(f'{save_dir}/predErrorPlot_{name}.png', dpi=150)
plt.show()
cv_df = pd.DataFrame.from_dict({'val_loss': VALIDATION_LOSS, 'val_mae': VALIDATION_MAE, 'val_mse': VALIDATION_MSE, 'val_R2': VALIDATION_R2}, orient='index').T
cv_csv_file = f'{save_dir}/cross_val.csv'
with open(cv_csv_file, mode='w') as f:
cv_df.to_csv(f)
from google.colab import drive
drive.mount('/content/gdrive')
save_path = f"/content/gdrive/MyDrive/MODEL BERHASIL/ResNet/{name}"
if not os.path.exists(save_path):
os.makedirs(save_path)
oripath = "saved_models/."
!cp -a "{oripath}" "{save_path}" # copies files to google drive
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# Lab 04a: Dogs vs Cats Image Classification Without Image Augmentation
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://colab.research.google.com/github/sres-dl-course/sres-dl-course.github.io/blob/master/notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/sres-dl-course/sres-dl-course.github.io/blob/master/notebooks/python/L04_C01_dogs_vs_cats_without_augmentation.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
</table>
In this tutorial, we will discuss how to classify images into pictures of cats or pictures of dogs. We'll build an image classifier using `tf.keras.Sequential` model and load data using `tf.keras.preprocessing.image.ImageDataGenerator`.
## Specific concepts that will be covered:
In the process, we will build practical experience and develop intuition around the following concepts
* Building _data input pipelines_ using the `tf.keras.preprocessing.image.ImageDataGenerator` class — How can we efficiently work with data on disk to interface with our model?
* _Overfitting_ - what is it, how to identify it?
<hr>
**Before you begin**
Before running the code in this notebook, reset the runtime by going to **Runtime -> Reset all runtimes** in the menu above. If you have been working through several notebooks, this will help you avoid reaching Colab's memory limits.
# Importing packages
Let's start by importing required packages:
* os — to read files and directory structure
* numpy — for some matrix math outside of TensorFlow
* matplotlib.pyplot — to plot the graph and display images in our training and validation data
```
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import matplotlib.pyplot as plt
import numpy as np
import logging
logger = tf.get_logger()
logger.setLevel(logging.ERROR)
```
# Data Loading
To build our image classifier, we begin by downloading the dataset. The dataset we are using is a filtered version of <a href="https://www.kaggle.com/c/dogs-vs-cats/data" target="_blank">Dogs vs. Cats</a> dataset from Kaggle (ultimately, this dataset is provided by Microsoft Research).
In previous Colabs, we've used <a href="https://www.tensorflow.org/datasets" target="_blank">TensorFlow Datasets</a>, which is a very easy and convenient way to use datasets. In this Colab however, we will make use of the class `tf.keras.preprocessing.image.ImageDataGenerator` which will read data from disk. We therefore need to directly download *Dogs vs. Cats* from a URL and unzip it to the Colab filesystem.
```
_URL = 'https://storage.googleapis.com/mledu-datasets/cats_and_dogs_filtered.zip'
zip_dir = tf.keras.utils.get_file('cats_and_dogs_filterted.zip', origin=_URL, extract=True)
```
The dataset we have downloaded has the following directory structure.
<pre style="font-size: 10.0pt; font-family: Arial; line-height: 2; letter-spacing: 1.0pt;" >
<b>cats_and_dogs_filtered</b>
|__ <b>train</b>
|______ <b>cats</b>: [cat.0.jpg, cat.1.jpg, cat.2.jpg ...]
|______ <b>dogs</b>: [dog.0.jpg, dog.1.jpg, dog.2.jpg ...]
|__ <b>validation</b>
|______ <b>cats</b>: [cat.2000.jpg, cat.2001.jpg, cat.2002.jpg ...]
|______ <b>dogs</b>: [dog.2000.jpg, dog.2001.jpg, dog.2002.jpg ...]
</pre>
We can list the directories with the following terminal command:
```
zip_dir_base = os.path.dirname(zip_dir)
!find $zip_dir_base -type d -print
```
We'll now assign variables with the proper file path for the training and validation sets.
```
base_dir = os.path.join(os.path.dirname(zip_dir), 'cats_and_dogs_filtered')
train_dir = os.path.join(base_dir, 'train')
validation_dir = os.path.join(base_dir, 'validation')
train_cats_dir = os.path.join(train_dir, 'cats') # directory with our training cat pictures
train_dogs_dir = os.path.join(train_dir, 'dogs') # directory with our training dog pictures
validation_cats_dir = os.path.join(validation_dir, 'cats') # directory with our validation cat pictures
validation_dogs_dir = os.path.join(validation_dir, 'dogs') # directory with our validation dog pictures
```
### Understanding our data
Let's look at how many cats and dogs images we have in our training and validation directory
```
num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))
num_cats_val = len(os.listdir(validation_cats_dir))
num_dogs_val = len(os.listdir(validation_dogs_dir))
total_train = num_cats_tr + num_dogs_tr
total_val = num_cats_val + num_dogs_val
print('total training cat images:', num_cats_tr)
print('total training dog images:', num_dogs_tr)
print('total validation cat images:', num_cats_val)
print('total validation dog images:', num_dogs_val)
print("--")
print("Total training images:", total_train)
print("Total validation images:", total_val)
```
# Setting Model Parameters
For convenience, we'll set up variables that will be used later while pre-processing our dataset and training our network.
```
BATCH_SIZE = 100 # Number of training examples to process before updating our models variables
IMG_SHAPE = 150 # Our training data consists of images with width of 150 pixels and height of 150 pixels
```
# Data Preparation
Images must be formatted into appropriately pre-processed floating point tensors before being fed into the network. The steps involved in preparing these images are:
1. Read images from the disk
2. Decode contents of these images and convert it into proper grid format as per their RGB content
3. Convert them into floating point tensors
4. Rescale the tensors from values between 0 and 255 to values between 0 and 1
Fortunately, all these tasks can be done using the class **tf.keras.preprocessing.image.ImageDataGenerator**.
We can set this up in a couple of lines of code.
```
train_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our training data
validation_image_generator = ImageDataGenerator(rescale=1./255) # Generator for our validation data
```
After defining our generators for training and validation images, **flow_from_directory** method will load images from the disk, apply rescaling, and resize them using single line of code.
```
train_data_gen = train_image_generator.flow_from_directory(batch_size=BATCH_SIZE,
directory=train_dir,
shuffle=True,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary')
val_data_gen = validation_image_generator.flow_from_directory(batch_size=BATCH_SIZE,
directory=validation_dir,
shuffle=False,
target_size=(IMG_SHAPE,IMG_SHAPE), #(150,150)
class_mode='binary')
```
### Visualizing Training images
We can visualize our training images by getting a batch of images from the training generator, and then plotting a few of them using `matplotlib`.
```
sample_training_images, _ = next(train_data_gen)
```
The `next` function returns a batch from the dataset. One batch is a tuple of (*many images*, *many labels*). For right now, we're discarding the labels because we just want to look at the images.
```
# This function will plot images in the form of a grid with 1 row and 5 columns where images are placed in each column.
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20,20))
axes = axes.flatten()
for img, ax in zip(images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
plotImages(sample_training_images[:5]) # Plot images 0-4
```
# Model Creation
## Exercise 4.1 Define the model
The model consists of four convolution blocks with a max pool layer in each of them. Then we have a fully connected layer with 512 units, with a `relu` activation function. The model will output class probabilities for two classes — dogs and cats — using `softmax`.
The list of model layers:
* 2D Convolution - 32 filters, 3x3 kernel, ReLU activation
* 2D Max pooling - 2x2 kernel
* 2D Convolution - 64 filters, 3x3 kernel, ReLU activation
* 2D Max pooling - 2x2 kernel
* 2D Convolution - 128 filters, 3x3 kernel, ReLU activation
* 2D Max pooling - 2x2 kernel
* 2D Convolution - 128 filters, 3x3 kernel, ReLU activation
* 2D Max pooling - 2x2 kernel
* Flatten
* Dense - 512 nodes
* Dense - 2 nodes
Check the documentation for how to specify the layers [https://www.tensorflow.org/api_docs/python/tf/keras/layers](https://www.tensorflow.org/api_docs/python/tf/keras/layers)
```
model = tf.keras.models.Sequential([
# TODO - Create the CNN model as specified above
])
```
### Exercise 4.1 Solution
The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.1.ipynb)
### Exercise 4.2 Compile the model
As usual, we will use the `adam` optimizer. Since we output a softmax categorization, we'll use `sparse_categorical_crossentropy` as the loss function. We would also like to look at training and validation accuracy on each epoch as we train our network, so we are passing in the metrics argument.
```
# TODO - Compile the model
```
#### Exercise 4.2 Solution
The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.2.ipynb)
### Model Summary
Let's look at all the layers of our network using **summary** method.
```
model.summary()
```
### Exercise 4.3 Train the model
It's time we train our network.
* Since we have a validation dataset, we can use this to evaluate our model as it trains by adding the `validation_data` parameter.
* `validation_steps` can also be added if you'd like to use less than full validation set.
```
# TODO - Fit the model
```
#### Exercise 4.3 Solution
The solution for the exercise can be found [here](https://colab.research.google.com/github/rses-dl-course/rses-dl-course.github.io/blob/master/notebooks/python/solutions/E4.3.ipynb)
### Visualizing results of the training
We'll now visualize the results we get after training our network.
```
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(EPOCHS)
plt.figure(figsize=(20, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.savefig('./foo.png')
plt.show()
```
As we can see from the plots, training accuracy and validation accuracy are off by large margin and our model has achieved only around **70%** accuracy on the validation set (depending on the number of epochs you trained for).
This is a clear indication of overfitting. Once the training and validation curves start to diverge, our model has started to memorize the training data and is unable to perform well on the validation data.
| github_jupyter |
#CHANDAN KUMAR (BATCH 3)- GOOGLE COLAB / logistic regression & Rigid & Lasso Regression
##(Rahul Agnihotri(T.L))
DATASET [HEART ](https://drive.google.com/file/d/10dopwCjH4VE557tSynCcY3fV9OBowq9h/view?usp=sharing)
#Packages to load
```
import numpy as np
import pandas as pd
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import GridSearchCV
# for hiding warning
import warnings
warnings.filterwarnings('ignore')
```
#Input directory
```
heart_df = pd.read_csv(r'/content/heart.csv')
heart_df
```
#About data set
The "target" field refers to the presence of heart disease in the patient. It is integer valued 0 = no/less chance of heart attack and 1 = more chance of heart attack
Attribute Information
- 1) age
- 2) sex
- 3) chest pain type (4 values)
- 4) resting blood pressure
- 5) serum cholestoral in mg/dl
- 6)fasting blood sugar > 120 mg/dl
- 7) resting electrocardiographic results (values 0,1,2)
- 8) maximum heart rate achieved
- 9) exercise induced angina
- 10) oldpeak = ST depression induced by exercise relative to rest
- 11)the slope of the peak exercise ST segment
- 12) number of major vessels (0-3) colored by flourosopy
- 13) thal: 0 = normal; 1 = fixed defect; 2 = reversable defect
- 14) target: 0= less chance of heart attack 1= more chance of heart attack
#Get to know About data
```
heart_df.head()
heart_df.dtypes
heart_df.isnull().sum()
print('Shape : ',heart_df.shape)
print('Describe : ',heart_df.describe())
```
#EDA(Exploratory Data Analysis)
```
#import pandas_profiling as pp
#pp.ProfileReport(heart_df)
%matplotlib inline
from matplotlib import pyplot as plt
fig,axes=plt.subplots(nrows=1,ncols=1,figsize=(10,5))
sns.countplot(heart_df.target)
fig,axes=plt.subplots(nrows=1,ncols=1,figsize=(15,10))
sns.distplot(heart_df['age'],hist=True,kde=True,rug=False,label='age',norm_hist=True)
heart_df.columns
corr = heart_df.corr(method = 'pearson')
corr
colormap = plt.cm.OrRd
plt.figure(figsize=(15, 10))
plt.title("Person Correlation of Features", y = 1.05, size = 15)
sns.heatmap(corr.astype(float).corr(), linecolor = "white", cmap = colormap, annot = True)
import plotly.express as px
px.bar(heart_df, x= 'age' , y='target', color='sex' , title= 'heart attack patoents age range and sex',
labels = { 'output': 'Number of patients', 'Age': 'Age od patient'})
```
#Creating and Predicting Learning Models
```
X= heart_df.drop(columns= ['target'])
y= heart_df['target']
```
##Data normalization
```
from sklearn.preprocessing import MinMaxScaler
# Data normalization [0, 1]
transformer = MinMaxScaler()
transformer.fit(X)
X = transformer.transform(X)
X
from sklearn.model_selection import train_test_split
x_test,x_train,y_test,y_train = train_test_split(X,y,test_size = 0.2,random_state = 123)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(x_train,y_train)
y_pred = lr.predict( x_test)
y_pred_proba = lr.predict_proba(x_test)[:, 1]
```
##Confusion_matrix
- conf_mat=multiclass,
- colorbar=True,
- show_absolute=False,
- show_normed=True,
- class_names=class_names
```
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
cm=confusion_matrix(y_test, y_pred)
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.rcParams['font.size'] = 40
#(conf_mat=multiclass,colorbar=True, show_absolute=False, show_normed=True, class_names=class_names)
plt.show()
# 0,0
# 0,1
# 1,0
# 1,1
print(classification_report(y_test, y_pred))
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import accuracy_score, classification_report, precision_score, recall_score
from sklearn.metrics import confusion_matrix, precision_recall_curve, roc_curve, auc, log_loss
[fpr, tpr, thr] = roc_curve(y_test, y_pred_proba)
print('Train/Test split results:')
print(lr.__class__.__name__+" accuracy is %2.3f" % accuracy_score(y_test, y_pred))
print(lr.__class__.__name__+" log_loss is %2.3f" % log_loss(y_test, y_pred_proba))
print(lr.__class__.__name__+" auc is %2.3f" % auc(fpr, tpr))
idx = np.min(np.where(tpr > 0.95)) # index of the first threshold for which the sensibility > 0.95
plt.figure(figsize=(10,10))
plt.plot(fpr, tpr, color='coral', label='ROC curve (area = %0.3f)' % auc(fpr, tpr))
plt.plot([0, 1], [0, 1], 'k--')
plt.plot([0,fpr[idx]], [tpr[idx],tpr[idx]], 'k--', color='blue')
plt.plot([fpr[idx],fpr[idx]], [0,tpr[idx]], 'k--', color='blue')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate (1 - specificity)', fontsize=5)
plt.ylabel('True Positive Rate (recall)', fontsize=5)
plt.title('Receiver operating characteristic (ROC) curve')
plt.legend(loc="lower right")
plt.show()
heart_df.corr()
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn import metrics
LR_model= LogisticRegression()
tuned_parameters = {'C': [0.001, 0.01, 0.1, 1, 10, 100, 1000] ,
'penalty':['l1','l2']
}
```
L1 and L2 are regularization parameters.They're used to avoid overfiting.Both L1 and L2 regularization prevents overfitting by shrinking (imposing a penalty) on the coefficients.
L1 is the first moment norm |x1-x2| (|w| for regularization case) that is simply the absolute dıstance between two points where L2 is second moment norm corresponding to Eucledian Distance that is |x1-x2|^2 (|w|^2 for regularization case).
In simple words,L2 (Ridge) shrinks all the coefficient by the same proportions but eliminates none, while L1 (Lasso) can shrink some coefficients to zero, performing variable selection. If all the features are correlated with the label, ridge outperforms lasso, as the coefficients are never zero in ridge. If only a subset of features are correlated with the label, lasso outperforms ridge as in lasso model some coefficient can be shrunken to zero.
```
heart_df.corr()
from sklearn.model_selection import GridSearchCV
LR= GridSearchCV(LR_model, tuned_parameters,cv=10)
LR.fit(x_train,y_train)
print(LR.best_params_)
y_prob = LR.predict_proba(x_test)[:,1] # This will give positive class prediction probabilities
y_pred = np.where(y_prob > 0.5, 1, 0) # This will threshold the probabilities to give class predictions.
LR.score(x_test, y_pred)
confusion_matrix=metrics.confusion_matrix(y_test,y_pred)
confusion_matrix
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
cm=confusion_matrix(y_test, y_pred)
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.rcParams['font.size'] = 40
#(conf_mat=multiclass,colorbar=True, show_absolute=False, show_normed=True, class_names=class_names)
plt.show()
auc_roc=metrics.classification_report(y_test,y_pred)
auc_roc
auc_roc=metrics.roc_auc_score(y_test,y_pred)
auc_roc
from sklearn.metrics import roc_curve, auc
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_prob)
roc_auc = auc(false_positive_rate, true_positive_rate)
roc_auc
LR_ridge= LogisticRegression(penalty='l2')
LR_ridge.fit(x_train,y_train)
y_prob = LR_ridge.predict_proba(x_test)[:,1] # This will give positive class prediction probabilities
y_pred = np.where(y_prob > 0.5, 1, 0) # This will threshold the probabilities to give class predictions.
LR_ridge.score(x_test, y_pred)
confusion_matrix=metrics.confusion_matrix(y_test,y_pred)
confusion_matrix
from sklearn.metrics import confusion_matrix, classification_report
from mlxtend.plotting import plot_confusion_matrix
cm=confusion_matrix(y_test, y_pred)
fig, ax = plot_confusion_matrix(conf_mat=cm)
plt.rcParams['font.size'] = 40
#(conf_mat=multiclass,colorbar=True, show_absolute=False, show_normed=True, class_names=class_names)
plt.show()
auc_roc=metrics.classification_report(y_test,y_pred)
auc_roc
auc_roc=metrics.roc_auc_score(y_test,y_pred)
auc_roc
from sklearn.metrics import roc_curve, auc
false_positive_rate, true_positive_rate, thresholds = roc_curve(y_test, y_prob)
roc_auc = auc(false_positive_rate, true_positive_rate)
roc_auc
```
#**EXPERIMENTAL ZONE**
#LASSO AND RIDGE
```
# This is formatted as code
```
```
Training_Accuracy_Before = []
Testing_Accuracy_Before = []
Training_Accuracy_After = []
Testing_Accuracy_After = []
Models = ['Linear Regression', 'Lasso Regression', 'Ridge Regression']
alpha_space = np.logspace(-4, 0, 30) # Checking for alpha from .0001 to 1 and finding the best value for alpha
alpha_space
ridge_scores = []
ridge = Ridge(normalize = True)
for alpha in alpha_space:
ridge.alpha = alpha
val = np.mean(cross_val_score(ridge,x_train,y_train, cv = 10))
ridge_scores.append(val)
lasso_scores = []
lasso = Lasso(normalize = True)
for alpha in alpha_space:
lasso.alpha = alpha
val = np.mean(cross_val_score(lasso, x_train,y_train, cv = 10))
lasso_scores.append(val)
plt.figure(figsize=(8, 8))
plt.plot(alpha_space, ridge_scores, marker = 'D', label = "Ridge")
plt.plot(alpha_space, lasso_scores, marker = 'D', label = "Lasso")
plt.legend()
plt.show()
# Performing GridSearchCV with Cross Validation technique on Lasso Regression and finding the optimum value of alpha
params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08
lasso = Lasso(normalize=True)
lasso_model = GridSearchCV(lasso, params, cv = 10)
lasso_model.fit(x_train, y_train)
print(lasso_model.best_params_)
print(lasso_model.best_score_)
# Using value of alpha as 0.009545 to get best accuracy for Lasso Regression
lasso = Lasso(alpha = 0.009545, normalize = True)
lasso.fit(x_train, y_train)
train_score = lasso.score(x_train, y_train)
print(train_score)
test_score = lasso.score(x_test, y_test)
print(test_score)
Training_Accuracy_Before.append(train_score)
Testing_Accuracy_Before.append(test_score)
# Performing GridSearchCV with Cross Validation technique on Ridge Regression and finding the optimum value of alpha
params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08
ridge = Ridge(normalize=True)
ridge_model = GridSearchCV(ridge, params, cv = 10)
ridge_model.fit(x_train, y_train)
print(ridge_model.best_params_)
print(ridge_model.best_score_)
# Using value of alpha as 1.2045035 to get best accuracy for Ridge Regression
ridge = Ridge(alpha = 1.2045035, normalize = True)
ridge.fit(x_train, y_train)
train_score = ridge.score(x_train, y_train)
print(train_score)
test_score = ridge.score(x_test, y_test)
print(test_score)
Training_Accuracy_Before.append(train_score)
Testing_Accuracy_Before.append(test_score)
coefficients = lasso.coef_
coefficients
from sklearn.linear_model import LinearRegression
logreg = LinearRegression()
logreg.fit(x_train, y_train)
train_score = logreg.score(x_train, y_train)
print(train_score)
test_score = logreg.score(x_test, y_test)
print(test_score)
Training_Accuracy_After.append(train_score)
Testing_Accuracy_After.append(test_score)
# Performing GridSearchCV with Cross Validation technique on Lasso Regression and finding the optimum value of alpha
params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08
lasso = Lasso(normalize=True)
lasso_model = GridSearchCV(lasso, params, cv = 10)
lasso_model.fit(x_train, y_train)
print(lasso_model.best_params_)
print(lasso_model.best_score_)
# Using value of alpha as 0.009545 to get best accuracy for Lasso Regression
lasso = Lasso(alpha = 0.009545, normalize = True)
lasso.fit(x_train, y_train)
train_score = lasso.score(x_train, y_train)
print(train_score)
test_score = lasso.score(x_test, y_test)
print(test_score)
Training_Accuracy_After.append(train_score)
Testing_Accuracy_After.append(test_score)
# Performing GridSearchCV with Cross Validation technique on Ridge Regression and finding the optimum value of alpha
params = {'alpha': (np.logspace(-8, 8, 100))} # It will check from 1e-08 to 1e+08
ridge = Ridge(normalize=True)
ridge_model = GridSearchCV(ridge, params, cv = 10)
ridge_model.fit(x_train, y_train)
print(ridge_model.best_params_)
print(ridge_model.best_score_)
# Using value of alpha as 1.204503 to get best accuracy for Ridge Regression
ridge = Ridge(alpha = 1.204503, normalize = True)
ridge.fit(x_train, y_train)
train_score = ridge.score(x_train, y_train)
print(train_score)
test_score = ridge.score(x_test, y_test)
print(test_score)
Training_Accuracy_After.append(train_score)
Testing_Accuracy_After.append(test_score)
plt.figure(figsize=(50,10))
plt.plot(Training_Accuracy_Before, label = 'Training_Accuracy_Before')
plt.plot(Training_Accuracy_After, label = 'Training_Accuracy_After')
plt.xticks(range(len(Models)), Models, Rotation = 45)
plt.title('Training Accuracy Behaviour')
plt.legend()
plt.show()
plt.figure(figsize=(50,10))
plt.plot(Testing_Accuracy_Before, label = 'Testing_Accuracy_Before')
plt.plot(Testing_Accuracy_After, label = 'Testing_Accuracy_After')
plt.xticks(range(len(Models)), Models, Rotation = 45)
plt.title('Testing Accuracy Behaviour')
plt.legend()
plt.show()
```
#**DANGER** **ZONE**
```
#list of alpha for tuning
params = {'alpha' : [0.001 , 0.001,0.01,0.05,
0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,.9,
1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,
10.0,20,30,40,50,100,500,1000]}
ridge = Ridge()
# cross validation
folds = 5
model_cv = GridSearchCV(estimator = ridge,
param_grid = params,
scoring = 'neg_mean_absolute_error',
cv = folds,
return_train_score = True,
verbose = 1)
model_cv.fit(x_train,y_train)
#Checking the value of optimum number of parameters
print(model_cv.best_params_)
print(model_cv.best_score_)
cv_results = pd.DataFrame(model_cv.cv_results_)
cv_results = cv_results[cv_results['param_alpha']<=1000]
cv_results
# plotting mean test and train scoes with alpha
cv_results['param_alpha'] = cv_results['param_alpha'].astype('int32')
plt.figure(figsize=(16,5))
# plotting
plt.plot(cv_results['param_alpha'], cv_results['mean_train_score'])
plt.plot(cv_results['param_alpha'], cv_results['mean_test_score'])
plt.xlabel('alpha')
plt.ylabel('Negative Mean Absolute Error')
plt.title("Negative Mean Absolute Error and alpha")
plt.legend(['train score', 'test score'], loc='upper right')
plt.show()
```
#Insights:
```
alpha = 4
ridge = Ridge(alpha=alpha)
ridge.fit(x_train,y_train)
ridge.coef_
```
| github_jupyter |
# Selected Economic Characteristics: Employment Status from the American Community Survey
**[Work in progress]**
This notebook downloads [selected economic characteristics (DP03)](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03) from the American Community Survey 2018 5-Year Data.
Data source: [American Community Survey 5-Year Data 2018](https://www.census.gov/data/developers/data-sets/acs-5year.html)
Authors: Peter Rose ([email protected]), Ilya Zaslavsky ([email protected])
```
import os
import pandas as pd
from pathlib import Path
import time
pd.options.display.max_rows = None # display all rows
pd.options.display.max_columns = None # display all columsns
NEO4J_IMPORT = Path(os.getenv('NEO4J_IMPORT'))
print(NEO4J_IMPORT)
```
## Download selected variables
* [Selected economic characteristics for US](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03)
* [List of variables as HTML](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03.html) or [JSON](https://api.census.gov/data/2018/acs/acs5/profile/groups/DP03/)
* [Description of variables](https://www2.census.gov/programs-surveys/acs/tech_docs/subject_definitions/2018_ACSSubjectDefinitions.pdf)
* [Example URLs for API](https://api.census.gov/data/2018/acs/acs5/profile/examples.html)
### Specify variables from DP03 group and assign property names
Names must follow the [Neo4j property naming conventions](https://neo4j.com/docs/getting-started/current/graphdb-concepts/#graphdb-naming-rules-and-recommendations).
```
variables = {# EMPLOYMENT STATUS
'DP03_0001E': 'population16YearsAndOver',
'DP03_0002E': 'population16YearsAndOverInLaborForce',
'DP03_0002PE': 'population16YearsAndOverInLaborForcePct',
'DP03_0003E': 'population16YearsAndOverInCivilianLaborForce',
'DP03_0003PE': 'population16YearsAndOverInCivilianLaborForcePct',
'DP03_0006E': 'population16YearsAndOverInArmedForces',
'DP03_0006PE': 'population16YearsAndOverInArmedForcesPct',
'DP03_0007E': 'population16YearsAndOverNotInLaborForce',
'DP03_0007PE': 'population16YearsAndOverNotInLaborForcePct'
#'DP03_0014E': 'ownChildrenOfTheHouseholderUnder6Years',
#'DP03_0015E': 'ownChildrenOfTheHouseholderUnder6YearsAllParentsInLaborForce',
#'DP03_0016E': 'ownChildrenOfTheHouseholder6To17Years',
#'DP03_0017E': 'ownChildrenOfTheHouseholder6To17YearsAllParentsInLaborForce',
}
fields = ",".join(variables.keys())
for v in variables.values():
print('e.' + v + ' = toInteger(row.' + v + '),')
print(len(variables.keys()))
```
## Download county-level data using US Census API
```
url_county = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=county:*'
df = pd.read_json(url_county, dtype='str')
df.fillna('', inplace=True)
df.head()
```
##### Add column names
```
df = df[1:].copy() # skip first row of labels
columns = list(variables.values())
columns.append('stateFips')
columns.append('countyFips')
df.columns = columns
```
Remove Puerto Rico (stateFips = 72) to limit data to US States
TODO handle data for Puerto Rico (GeoNames represents Puerto Rico as a country)
```
df.query("stateFips != '72'", inplace=True)
```
Save list of state fips (required later to get tract data by state)
```
stateFips = list(df['stateFips'].unique())
stateFips.sort()
print(stateFips)
df.head()
# Example data
df[(df['stateFips'] == '06') & (df['countyFips'] == '073')]
df['source'] = 'American Community Survey 5 year'
df['aggregationLevel'] = 'Admin2'
```
### Save data
```
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentAdmin2.csv", index=False)
```
## Download zip-level data using US Census API
```
url_zip = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=zip%20code%20tabulation%20area:*'
df = pd.read_json(url_zip, dtype='str')
df.fillna('', inplace=True)
df.head()
```
##### Add column names
```
df = df[1:].copy() # skip first row
columns = list(variables.values())
columns.append('stateFips')
columns.append('postalCode')
df.columns = columns
df.head()
# Example data
df.query("postalCode == '90210'")
df['source'] = 'American Community Survey 5 year'
df['aggregationLevel'] = 'PostalCode'
```
### Save data
```
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentZip.csv", index=False)
```
## Download tract-level data using US Census API
Tract-level data are only available by state, so we need to loop over all states.
```
def get_tract_data(state):
url_tract = f'https://api.census.gov/data/2018/acs/acs5/profile?get={fields}&for=tract:*&in=state:{state}'
df = pd.read_json(url_tract, dtype='str')
time.sleep(1)
# skip first row of labels
df = df[1:].copy()
# Add column names
columns = list(variables.values())
columns.append('stateFips')
columns.append('countyFips')
columns.append('tract')
df.columns = columns
return df
df = pd.concat((get_tract_data(state) for state in stateFips))
df.fillna('', inplace=True)
df['tract'] = df['stateFips'] + df['countyFips'] + df['tract']
df['source'] = 'American Community Survey 5 year'
df['aggregationLevel'] = 'Tract'
# Example data for San Diego County
df[(df['stateFips'] == '06') & (df['countyFips'] == '073')].head()
```
### Save data
```
df.to_csv(NEO4J_IMPORT / "03a-USCensusDP03EmploymentTract.csv", index=False)
df.shape
```
| github_jupyter |
# Settings
```
%env TF_KERAS = 1
import os
sep_local = os.path.sep
import sys
# sys.path.append('..' + sep_local + '..' + sep_local +'..' + sep_local + '..' + sep_local + '..'+ sep_local + '..') # For Windows import
# os.chdir('..' + sep_local + '..' + sep_local +'..' + sep_local + '..' + sep_local + '..'+ sep_local + '..') # For Linux import
os.chdir('/content/Generative_Models/')
print(sep_local)
print(os.getcwd())
import tensorflow as tf
print(tf.__version__)
```
# Dataset loading
```
dataset_name='atari_pacman'
images_dir = IMG_DIR
# images_dir = '/home/azeghost/datasets/.mspacman/atari_v1/screens/mspacman' #Linux
#images_dir = 'C:\\projects\\pokemon\DS06\\'
validation_percentage = 25
valid_format = 'png'
from training.generators.file_image_generator import create_image_lists, get_generators
imgs_list = create_image_lists(
image_dir=images_dir,
validation_pct=validation_percentage,
valid_imgae_formats=valid_format,
verbose=0
)
scale=1
image_size=(160//scale, 210//scale, 3)
batch_size = 10
EPIS_LEN = 10
EPIS_SHIFT = 5
inputs_shape = image_size
latents_dim = 30
intermediate_dim = 30
training_generator, testing_generator = get_generators(
images_list=imgs_list,
image_dir=images_dir,
image_size=image_size,
batch_size=batch_size,
class_mode='episode_flat',
episode_len=EPIS_LEN,
episode_shift=EPIS_SHIFT
)
import tensorflow as tf
train_ds = tf.data.Dataset.from_generator(
lambda: training_generator,
output_types=(tf.float32, tf.float32) ,
output_shapes=(tf.TensorShape((batch_size* EPIS_LEN, ) + image_size),
tf.TensorShape((batch_size* EPIS_LEN, ) + image_size)
)
)
test_ds = tf.data.Dataset.from_generator(
lambda: testing_generator,
output_types=(tf.float32, tf.float32) ,
output_shapes=(tf.TensorShape((batch_size* EPIS_LEN, ) + image_size),
tf.TensorShape((batch_size* EPIS_LEN, ) + image_size)
)
)
_instance_scale=1.0
for data in train_ds:
_instance_scale = float(data[0].numpy().max())
break
_instance_scale = 1.0
import numpy as np
from collections.abc import Iterable
if isinstance(inputs_shape, Iterable):
_outputs_shape = np.prod(inputs_shape)
inputs_shape
```
# Model's Layers definition
```
# tdDense = lambda **kwds: tf.keras.layers.TimeDistributed(tf.keras.layers.Dense(**kwds))
# enc_lays = [tdDense(units=intermediate_dim//2, activation='relu'),
# tdDense(units=intermediate_dim//2, activation='relu'),
# tf.keras.layers.Flatten(),
# tf.keras.layers.Dense(units=latents_dim)]
# dec_lays = [tf.keras.layers.Dense(units=latents_dim, activation='relu'),
# tf.keras.layers.Reshape(inputs_shape),
# tdDense(units=intermediate_dim, activation='relu'),
# tdDense(units=_outputs_shape),
# tf.keras.layers.Reshape(inputs_shape)
# ]
enc_lays = [tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Dense(units=intermediate_dim//2, activation='relu'),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(units=latents_dim)]
dec_lays = [tf.keras.layers.Dense(units=latents_dim, activation='relu'),
tf.keras.layers.Dense(units=3*intermediate_dim//2, activation='relu'),
tf.keras.layers.Dense(units=_outputs_shape),
tf.keras.layers.Reshape(inputs_shape)]
```
# Model definition
```
model_name = dataset_name+'AE_Dense_reconst_ell'
#windows
#experiments_dir='..' + sep_local + '..' + sep_local +'..' + sep_local + '..' + sep_local + '..'+sep_local+'experiments'+sep_local + model_name
#linux
experiments_dir=os.getcwd()+ sep_local +'experiments'+sep_local + model_name
from training.autoencoding_basic.transformative.AE import autoencoder as AE
# inputs_shape=image_size
variables_params = \
[
{
'name': 'inference',
'inputs_shape':inputs_shape,
'outputs_shape':latents_dim,
'layers': enc_lays
}
,
{
'name': 'generative',
'inputs_shape':latents_dim,
'outputs_shape':inputs_shape,
'layers':dec_lays
}
]
from os.path import abspath
from utils.data_and_files.file_utils import create_if_not_exist
_restore = os.path.join(experiments_dir, 'var_save_dir')
create_if_not_exist(_restore)
absolute = abspath(_restore)
print("Restore_dir",absolute)
absolute = abspath(experiments_dir)
print("Recording_dir",absolute)
print("Current working dir",os.getcwd())
#to restore trained model, set filepath=_restore
ae = AE(
name=model_name,
latents_dim=latents_dim,
batch_size=batch_size * EPIS_LEN,
episode_len= 1,
variables_params=variables_params,
filepath=_restore
)
#ae.compile(metrics=None)
ae.compile()
```
# Callbacks
```
from training.callbacks.sample_generation import SampleGeneration
from training.callbacks.save_model import ModelSaver
es = tf.keras.callbacks.EarlyStopping(
monitor='loss',
min_delta=1e-12,
patience=12,
verbose=1,
restore_best_weights=False
)
ms = ModelSaver(filepath=_restore)
csv_dir = os.path.join(experiments_dir, 'csv_dir')
create_if_not_exist(csv_dir)
csv_dir = os.path.join(csv_dir, model_name+'.csv')
csv_log = tf.keras.callbacks.CSVLogger(csv_dir, append=True)
absolute = abspath(csv_dir)
print("Csv_dir",absolute)
image_gen_dir = os.path.join(experiments_dir, 'image_gen_dir')
create_if_not_exist(image_gen_dir)
absolute = abspath(image_gen_dir)
print("Image_gen_dir",absolute)
sg = SampleGeneration(latents_shape=latents_dim, filepath=image_gen_dir, gen_freq=5, save_img=True, gray_plot=False)
```
# Model Training
```
ae.fit(
x=train_ds,
input_kw=None,
steps_per_epoch=10,
epochs=10,
verbose=2,
callbacks=[ es, ms, csv_log, sg],
workers=-1,
use_multiprocessing=True,
validation_data=test_ds,
validation_steps=10
)
```
# Model Evaluation
## inception_score
```
from evaluation.generativity_metrics.inception_metrics import inception_score
is_mean, is_sigma = inception_score(ae, tolerance_threshold=1e-6, max_iteration=200)
print(f'inception_score mean: {is_mean}, sigma: {is_sigma}')
```
## Frechet_inception_distance
```
from evaluation.generativity_metrics.inception_metrics import frechet_inception_distance
fis_score = frechet_inception_distance(ae, training_generator, tolerance_threshold=1e-6, max_iteration=10, batch_size=32)
print(f'frechet inception distance: {fis_score}')
```
## perceptual_path_length_score
```
from evaluation.generativity_metrics.perceptual_path_length import perceptual_path_length_score
ppl_mean_score = perceptual_path_length_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200, batch_size=32)
print(f'perceptual path length score: {ppl_mean_score}')
```
## precision score
```
from evaluation.generativity_metrics.precision_recall import precision_score
_precision_score = precision_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'precision score: {_precision_score}')
```
## recall score
```
from evaluation.generativity_metrics.precision_recall import recall_score
_recall_score = recall_score(ae, training_generator, tolerance_threshold=1e-6, max_iteration=200)
print(f'recall score: {_recall_score}')
```
# Image Generation
## image reconstruction
### Training dataset
```
%load_ext autoreload
%autoreload 2
from training.generators.image_generation_testing import reconstruct_from_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'reconstruct_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
reconstruct_from_a_batch(ae, testing_generator, save_dir)
```
## with Randomness
```
from training.generators.image_generation_testing import generate_images_like_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_training_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, training_generator, save_dir)
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'generate_testing_images_like_a_batch_dir')
create_if_not_exist(save_dir)
generate_images_like_a_batch(ae, testing_generator, save_dir)
```
### Complete Randomness
```
from training.generators.image_generation_testing import generate_images_randomly
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'random_synthetic_dir')
create_if_not_exist(save_dir)
generate_images_randomly(ae, testing_generator, save_dir)
```
### Stacked inputs outputs and predictions
```
from training.generators.image_generation_testing import predict_from_a_batch
from utils.data_and_files.file_utils import create_if_not_exist
save_dir = os.path.join(experiments_dir, 'predictions')
create_if_not_exist(save_dir)
predict_from_a_batch(ae, testing_generator, save_dir)
```
| github_jupyter |
# DS106 Machine Learning : Lesson Nine Companion Notebook
### Table of Contents <a class="anchor" id="DS106L9_toc"></a>
* [Table of Contents](#DS106L9_toc)
* [Page 1 - Introduction](#DS106L9_page_1)
* [Page 2 - What are Bayesian Statistics?](#DS106L9_page_2)
* [Page 3 - Bayes Theorem](#DS106L9_page_3)
* [Page 4 - Parts of Bayes Theorem](#DS106L9_page_4)
* [Page 5 - A/B Testing](#DS106L9_page_5)
* [Page 6 - Bayesian Network Basics](#DS106L9_page_6)
* [Page 7 - Key Terms](#DS106L9_page_7)
* [Page 8 - Lesson 4 Practice Hands-On](#DS106L9_page_8)
* [Page 9 - Lesson 4 Practice Hands-On Solution](#DS106L9_page_9)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 1 - Overview of this Module<a class="anchor" id="DS106L9_page_1"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
```
from IPython.display import VimeoVideo
# Tutorial Video Name: Bayesian Networks
VimeoVideo('388131444', width=720, height=480)
```
The transcript for the above overview video **[is located here](https://repo.exeterlms.com/documents/V2/DataScience/Video-Transcripts/DSO106-ML-L04overview.zip)**.
# Introduction
Bayesian Networks are a way for you to apply probability knowledge in a machine learning algorithm. By the end of this lesson, you should be able to:
* Explain what a Bayesian Network is
* Perform Bayesian networks in Python
This lesson will culminate in a hands-on in which you use Bayesian networks to predict the chance of shark attack.
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 2 - What are Bayesian Statistics?<a class="anchor" id="DS106L9_page_2"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# What are Bayesian Statistics?
*Bayesian statistics* are a branch of stats that make use of probability to test your beliefs against data. In practice, the simplest Bayesian statistics are similar in concept to the basics you learned - probability, the normal distribution, etc. But they go out of their way to change the names of everything! As if statistics weren't complicated enough! In this lesson, you'll be learning about Bayesian statistics using the terms you're already familiar with, but don't get into a fistfight with anyone if they use slightly different lingo!
---
## Bayesian Reasoning
Here's an example to ease you into the Bayesian mindset. You start off with an observation of data. For instance, say you hear a very loud, rushing noise outside. You might come up with a couple different ideas of what is going on, or hypotheses, and those are based on your previous experience. You might have a couple different options: it's a plane making the noise, or it's a tornado. Which is more likely? Well, you know that based on your past experience, tornados make this much noise only once or twice a year when they are very severe. So you're thinking that the plane is more likely.
Now add in additional data - you live on an Air Force base. Suddenly, the likelihood that the noise is a fighter jet taking off is much, much higher, and your belief that there's a tornado is almost non-existent. The brilliant thing about Bayesian statistics is that you can continually update your hypotheses based on updated data. You can even compare hypotheses to see which one fits your data better.
An important thing to note is that your data should help change your beliefs about the world, but you should not search for data to back up your beliefs!
---
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 3 - Bayes Theorem<a class="anchor" id="DS106L9_page_3"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Bayes Theorem
Remember back to multiple event probability? Where you either used `or` or `and` to combine the probability of multiple things happening? Well, those were great, but they implied independence - that the probability of one of those things did not impact the probability of the other whatsoever. But what happens when your two events are related in any way? For instance, color blindness is much more prevalent in males than in females. So the probability that any one random person is color blind very much depends on their gender. So you can't possibly assume that there is no relation between those two variables!
How would you calculate probability in that instance? Enter *Bayes theorem*! Bayes theorem is a special probability formula that allows you to calculate the likelihood of an event given the likelihood of another event.
---
## Bayes Formula
Here is the mathematical formula for Bayes theorem. Don't panic! It's not as bad as it looks! You can even see it in neon lights if that makes it more fun!

Too hard to read? Well, you can have less fun but also squint less with this bad boy:

In plain English, this is what this reads like:
> The probability of event A given the probability of event B is equal to the probability of event A times the probability of event B given A, divided by the probability of B.
Quite a mouthful! You can break it down even further. A and B are just two events that are not independent. It's assumed A is the first and B is the second, but it doesn't really matter, as long as you stay consistent with your variable assignment throughout the use of the equation.
Then you have `P`, which is just shorthand for probability.
And lastly, you have the pipe symbol, `|`. This means "given." All in all, this equation is telling you that if you know the probability of A by itself, and the probability of B by itself, then you can figure out how A and B interact.
---
## Bayesian Reasoning with the Bayes Formula
If you want to walk this into the wonderful world of Bayes reasoning that you've just hit upon, you can think of this in terms of observations and beliefs. Substitute for `A` beliefs, and for `B`, observations. Now the question becomes, what is the probability of my beliefs being true, given my observations?
The pretty cool thing about this is that with Bayes theorem, you can figure out exactly how much your beliefs change because of evidence.
---
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 4 - Parts of Bayes Theorem<a class="anchor" id="DS106L9_page_4"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Parts of Bayes Theorem
There are three components to Bayes theorem:
* Posterior probability
* Likelihood
* Prior probability
You will learn about these components in more detail below!
---
## Posterior Probability
The *posterior probability* is the part of the theorem that lets you quantify how strongly you hold beliefs about the data you've observed. The posterior probability is the end result of Bayes' theorem and what you're trying to find out. This is often shortened to just "posterior." No butt jokes, guys!
---
## Likelihood
The *likelihood* is the probability of the data given your current beliefs. i.e. How likely is it that x happens? This represents the top left portion of Bayes' theorem - the P(B|A) part.
---
## Prior Probability
The *prior probability* is all about the strength of your belief before you see the data. Hence, prior, meaning before! This represents the top right portion of Bayes' theorem - the P(A) part.
---
## The Bottom?
You may be wondering about the bottom of the equation. Doesn't that get its own special name too? Apparently not, but you're encouraged to give it one. Stormageddon, anyone? But the bottom portion of Bayes' theorem helps normalize the data, so that even if you have a different amount of data in A and B, you can still compare them fairly.
---
## An Example
You will now calculate the probability that your instructor's a dork, given that she cuddles her statistics book at night. Call "your instructor's a dork" `A` and "cuddling statistics books" `B`.
---
### Find the Likelihood
You can think of the likelihood in this scenario as the probability that cuddling a statistics book is good evidence that your instructor's a dork. If you are pretty darn certain, then you could make it a probability like 8/10. If you change your mind at any point, well, guess what? That is totally fine! This means `P(B|A) = 8/10`.
---
### Find the Prior
First, you need to calculate the prior. Remember, this is just the probability of A. Believe it or not, a survey found that 60% of Americans consider themselves nerds. So you'll use a probability of 6/10 for that. That means: `P(A) = 6/10`.
---
### Calculate the Normalizing Factor P(B)
What is `P(B)`? That's on the bottom! Well that is the probability that someone cuddles their statistics book at night, regardless of whether or not they are a dork. How many people is that? Well, you could take an educated guess based upon the fact that only 11% of people take statistics in a secondary school, and of those, 55% sell them back after the course. That means that only 6.05% (11% * 55%) still own a statistics book after a semester is up. So it's not even very likely that people have statistics books, let alone cuddle them. Maybe 1 in 100 will cuddle a statistics book, and with only 1 in 4 owning them at all...that makes it `6.05% * 1%` or `.000065`.
That is one way to go. But if you don't want to estimate it, or it is difficult to estimate it, then you can choose from a standard `P(B)` setup. Your choices are:
* .05
* .01
* .005
* .001
It is important to note that the smaller the `P(B)`, the larger your posterior probability, or end result, is.
---
### Calculate the Posterior
Next, you will calculate the posterior. Remember that this is your overall goal! You are ready to solve this bad boy!
This is just plug 'n play at this point:
```text
P(A|B) = (P(B|A) * P(A)) / P(B)
P(A|B) = (.8 * .6) / .000065
P(A|B) = .3 / .000065
P(A|B) = 4,615.38
```
That's great! You have a number! But what does it mean? It's really hard to say for sure, especially without a comparison to an alternative hypothesis. It's sort of like comparing machine learning models with AIC - the number itself doesn't matter, just whether it is larger or smaller than other models.
Can you guess what you're going to do next?
---
### Create and Test Alternative Hypotheses Using Bayes
Ok, so one explanation for why your instructor may cuddle her statistics textbook at night is because she doesn't have a pillow. That becomes your new `A`. A quick internet search shows no relevant results. You can then assume that 99% of people own a pillow, which means that 1% don't.
Your new `P(A)` is now 1/100. And that's probably a high estimate of those who don't own a pillow. How does that change your results?
Do some more plug 'n chug!
```text
P(A|B) = (P(B|A) * P(A)) / P(B)
P(A|B) = (.8 * .01) / .000065
P(A|B) = .0008 / .000065
P(A|B) = 12.31
```
So this means that it is much more likely that your instructor's a dork and not that she doesn't own a pillow. Tada! Relative probability at its finest.
---
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 5 - A/B Testing<a class="anchor" id="DS106L9_page_5"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# A/B Testing
Remember all those fun research designs you learned about in Basic Statistics? Well, there are more than you learned about there. There are nearly infinite variations having to do with comparisons, varying timepoints, and messing with individuals over and over again. Ethically, of course. And only if they're willing!
A/B testing is yet another type of research design, in which you are directly comparing two methods. In practice, if you have means, you'd compare group A and group B with a *t*-test. But what if you don't have means? What if you have probabilities or percentages? Enter the Bayesian A/B test!
---
## Create the Prior
Say you are testing whether your existing recipe for cream cheese frosting (A) is better than your new recipe for cream cheese frosting (B), and that you are testing it at your local bakesale. Your null hypothesis will be that these frostings will be totally equal. No one will like the new one any better than the old. So, assume that 80% of all bakesale buyers will finish eating your cupcakes with both types of frosting.
---
## Collect Data
Now that you have a hypothesis to test, it's time to collect data! You hold that bakesale, and for the old cream cheese frosting, A, 82% of people finish eating their cupcake. And for the new cream cheese frosting recipe, only 61% finish eating their cupcake. Want it in table form? Take a peek.
<table class="table table-striped">
<tr>
<th>Frosting Type</th>
<th>Ate it All</th>
<th>Did Not Eat it All</th>
<th>Ratio</th>
</tr>
<tr>
<td>Old</td>
<td>95</td>
<td>22</td>
<td>.82</td>
</tr>
<tr>
<td>New</td>
<td>73</td>
<td>46</td>
<td>.61</td>
</tr>
</table>
Right off the bat, you should be thinking to yourself that perhaps frosting recipe B isn't as good. But, it's always a good idea to science things and know for sure! That's what statistics is all about!
---
## Work the Problem in R using Monte Carlo Simulation
That's right, folks, you're out of calculator land again and into programming! Lucky for you, you can finish the A/B testing in R.
*Monte carlo simulation* is a way to simulate the results of something by re-sampling the data you already have. It's based off a little bit of data, but to get the best results, you may want a lot more rows than you have. So use monte carlo simulation to expand things. Kind of like those toy dinosaurs that grow when you pour water over them.
The function to do this is `rbeta()` function, which samples from the probability density function for a binomial distribution. Remember that the binomial distribution is one in which you only have two outcomes. For instance, a) did eat the whole cupcake or b) did not eat the whole cupcake.
There are two components of the beta distribution that you'll need to define as variables, in addition to the number of trials you intend to use:
* alpha: How many times an event happens that you care about
* beta: How many times an event happens that you don't care about
First, assign some variables in R. You'll need a variable to hold onto the priors and the number of trials you want to extend this to. Although you can choose any number of trials you want, here, you'll use `10,000`.
```{r}
trials <-10000
```
`alpha` and `beta` are based on the priors you created. Since you thought that about 80% of people would finish eating a cupcake, `8` becomes your `alpha`. `beta`, the event you don't care about, or not finishing a cupcake, would be `2`. This is because of the "not" rule of probability. You've only got two potential options - people either will finish eating their cupcake or they won't - so the probability of not eating is one minus the probability of eating. Since you are doing this out of 10, that means 10-8 = 2, and 2 becomes your `beta`.
```{r}
alpha <- 8
beta <- 2
```
Now, you are all set up to use `rbeta()` at last! You'll use it for both frosting types. Remember that A was your old, tried-and-true cream cheese frosting recipe, and B was the new one. The variable `samplesA` calculates the probability of the data you collected happening. The first argument it uses is the number of trials you want to simulate this over, and the second is the number of people who ate all of the cupcake with frosting A plus the prior of alpha. The third argument is the number of people who did not eat frosting A plus the prior of beta. You are basically comparing your guess with reality here.
You will follow the same flow for `samplesB`.
```{r}
samplesA <- rbeta(trials, 95+alpha, 22 + beta)
samplesB <- rbeta(trials, 73+alpha, 46 + beta)
```
Lastly, you can figure out if B is better by seeing the percentage of the trials in which B came back greater than A. You are basically just adding up with the `sum()` function every time that `samplesB` was greater than `samplesA` out of the total number of `trials`.
```{R}
Bsuperior <- sum(samplesB > samplesA) / trials
```
The end result is `0`. Wow! Your initial suspicions were right! There is definitely a clear case to stick with your original frosting, because in no situations out of 10,000 did people ever eat the whole cupcake more times with frosting B, your new recipe!
---
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 6 - Bayesian Network Basics<a class="anchor" id="DS106L9_page_6"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Bayesian Network Basics
*Bayesian Statistics* is based around conditional probability. *Bayesian Networks* are a specific type of Bayesian Statistics that map out the conditional relationships of multiple variables. Use Bayesian Networks when you want to find the probability of an outcome when it is impacted by several previous conditional variables.
The image below is an example of a simple Bayesian Network. The results of condition A impact condition B and condition C, and both condition B and condition C impact the probability of condition D. This means that condition D is the final thing you are trying to predict.

---
## Example
How about an example to clear things up? Ask yourself if you will have fun at the beach today. In this case, you want to know the probability of having fun at the beach today. Sounds simple, right? But maybe not. First, ask yourself if it is sunny today. This directly impacts the temperature of the beach and how crowded it is. If it is sunny, it is more likely to be hot and it is more likely to be crowded. Whether or not it is sunny does not directly impact if you will have fun, but if the beach is hot or if the beach is crowded will both impact your probability of having fun. If the beach is warm and not crowded, you are more likely to have fun than if the beach is blazing hot and so busy you are packed in like sardines.

---
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 7 - Key Terms<a class="anchor" id="DS106L9_page_7"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Key Terms
Below is a list and short description of the important keywords learned in this lesson. Please read through and go back and review any concepts you do not fully understand. Great Work!
<table class="table table-striped">
<tr>
<th>Keyword</th>
<th>Description</th>
</tr>
<tr>
<td style="font-weight: bold;" nowrap>Bayesian Statistics</td>
<td>Statistics using conditional probability.</td>
</tr>
<tr>
<td style="font-weight: bold;" nowrap>Bayesian Networks</td>
<td>Machine learning using the conditional relationships of multiple variables.</td>
</tr>
</table>
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 8 - Lesson 4 Practice Hands-On<a class="anchor" id="DS106L9_page_8"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
This Hands-On will **not** be graded, but you are encouraged to complete it. However, the best way to become a data scientist is to practice.
<div class="panel panel-danger">
<div class="panel-heading">
<h3 class="panel-title">Caution!</h3>
</div>
<div class="panel-body">
<p>Do not submit your project until you have completed all requirements, as you will not be able to resubmit.</p>
</div>
</div>
---
## Bayesian Statistics Hands-On
For this hands-on, you will be determining which type of mold-removal solution works better: just bleaching objects, or bleaching them and scrubbing them down thoroughly out of 10,000 trials. Based on the priors you created, the mold-removal solutions have a 90% chance of working.
You're trying to determine whether the mold will grow back or not, using the following table:
<table class="table table-striped">
<tr>
<th>Mold Removal Type</th>
<th>Mold Returned</th>
<th>Did Not Return</th>
<th>Ratio</th>
</tr>
<tr>
<td>Bleach</td>
<td>27</td>
<td>39</td>
<td>.41</td>
</tr>
<tr>
<td>Bleach and Scrubbing</td>
<td>10</td>
<td>45</td>
<td>.18</td>
</tr>
</table>
Complete A/B testing and Monte Carlo simulation using R. Please attach your R script file with your code documented and information in comments about your findings.
<div class="panel panel-danger">
<div class="panel-heading">
<h3 class="panel-title">Caution!</h3>
</div>
<div class="panel-body">
<p>Be sure to zip and submit your entire directory when finished!</p>
</div>
</div>
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Page 9 - Lesson 4 Practice Hands-On Solution<a class="anchor" id="DS106L9_page_9"></a>
[Back to Top](#DS106L9_toc)
<hr style="height:10px;border-width:0;color:gray;background-color:gray">
# Lesson 4 Practice Hands-On Solution
```{r}
trials <-10000
# Create a variable to hold onto the priors and the number of trials you want to extend this to.
alpha <- 9
beta <- 1
# Create your alpha and beta variables out of the priors which were 90% leaving the beta to be 10%.
samplesA <- rbeta(trials, 27+alpha, 39 + beta)
samplesB <- rbeta(trials, 10+alpha, 45 + beta)
# Your rbeta() is ready to be set up by placing the function inside of a two separate sample variables. The alpha is added with the Mold Returned and the beta is added with the Did Not Return.
Bsuperior <- sum(samplesB > samplesA) / trials
# The sum() function is used to add up every time that samplesB was greater than samplesA out of the total number of trials. You are calculating the percentage of trials in which sampleB came back greater than sampleA.
Bsuperior
# Print the answer of the function above.
# Bleach theres a .1318 % chance that the "bleach" is 99% effective
0.1318
```
| github_jupyter |
## Dimensionality Reduction
```
from sklearn.decomposition import PCA
```
### Principal Components Analysis
```
o_dir = os.path.join('outputs','pca')
if os.path.isdir(o_dir) is not True:
print("Creating '{0}' directory.".format(o_dir))
os.mkdir(o_dir)
pca = PCA() # Use all Principal Components
pca.fit(scdf) # Train model on all data
pcdf = pd.DataFrame(pca.transform(scdf)) # Transform data using model
for i in range(0,21):
print("Amount of explained variance for component {0} is: {1:6.2f}%".format(i, pca.explained_variance_ratio_[i]*100))
print("The amount of explained variance of the SES score using each component is...")
sns.lineplot(x=list(range(1,len(pca.explained_variance_ratio_)+1)), y=pca.explained_variance_ratio_)
pca = PCA(n_components=11)
pca.fit(scdf)
scores = pd.DataFrame(pca.transform(scdf), index=scdf.index)
scores.to_csv(os.path.join(o_dir,'Scores.csv.gz'), compression='gzip', index=True)
# Adapted from https://stackoverflow.com/questions/22984335/recovering-features-names-of-explained-variance-ratio-in-pca-with-sklearn
i = np.identity(scdf.shape[1]) # identity matrix
coef = pca.transform(i)
loadings = pd.DataFrame(coef, index=scdf.columns)
loadings.to_csv(os.path.join(o_dir,'Loadings.csv.gz'), compression='gzip', index=True)
print(scores.shape)
scores.sample(5, random_state=42)
print(loadings.shape)
loadings.sample(5, random_state=42)
odf = pd.DataFrame(columns=['Variable','Component Loading','Score'])
for i in range(0,len(loadings.index)):
row = loadings.iloc[i,:]
for c in list(loadings.columns.values):
d = {'Variable':loadings.index[i], 'Component Loading':c, 'Score':row[c]}
odf = odf.append(d, ignore_index=True)
g = sns.FacetGrid(odf, col="Variable", col_wrap=4, height=3, aspect=2.0, margin_titles=True, sharey=True)
g = g.map(plt.plot, "Component Loading", "Score", marker=".")
```
### What Have We Done?
```
sns.set_style('white')
sns.jointplot(data=scores, x=0, y=1, kind='hex', height=8, ratio=8)
```
#### Create an Output Directory and Load the Data
```
o_dir = os.path.join('outputs','clusters-pca')
if os.path.isdir(o_dir) is not True:
print("Creating '{0}' directory.".format(o_dir))
os.mkdir(o_dir)
score_df = pd.read_csv(os.path.join('outputs','pca','Scores.csv.gz'))
score_df.rename(columns={'Unnamed: 0':'lsoacd'}, inplace=True)
score_df.set_index('lsoacd', inplace=True)
# Ensures that df is initialised but original scores remain accessible
df = score_df.copy(deep=True)
score_df.describe()
score_df.sample(3, random_state=42)
```
#### Rescale the Loaded Data
We need this so that differences in the component scores don't cause the clustering algorithms to focus only on the 1st component.
```
scaler = preprocessing.MinMaxScaler()
df[df.columns] = scaler.fit_transform(df[df.columns])
df.describe()
df.sample(3, random_state=42)
```
| github_jupyter |
### The model
$u(c) = log(c)$ utility function
$y = 1$ Deterministic income
$p(r = 0.02) = 0.5$
$p(r = -0.02) = 0.5$
### value iteration
```
# infinite horizon MDP problem
%pylab inline
import numpy as np
from scipy.optimize import minimize
def u(c):
return np.log(c)
# discounting factor
beta = 0.95
# wealth level
w_low = 0
w_high = 10
# interest rate
r = 0.02
# deterministic income
y = 1
# good state and bad state economy with equal probability 0.5
# with good investment return 0.05 or bad investment return -0.05
ws = np.linspace(0.001,10**(1/2),100)**2
Vs = np.zeros(100)
Cs = np.zeros(100)
# Value iteration
for j in range(50):
if j % 10 == 0:
print(j)
for i in range(len(ws)):
w = ws[i]
def obj(c):
return -(u(c) + beta*(np.interp((y+w-c)*(1+r), ws, Vs) + np.interp((y+w-c)*(1-r), ws, Vs))/2)
bounds = [(0.0001, y+w-0.0001)]
res = minimize(obj, 0.0001, method='SLSQP', bounds=bounds)
Cs[i] = res.x[0]
Vs[i] = -res.fun
plt.plot(ws,Vs)
plt.plot(ws,Cs)
```
### policy gradient
Assume the policy form $\theta = (a,b,c, \sigma)$, then $\pi_\theta$ ~ $N(log(ax+b)+c, \sigma)$
Assume the initial value $a = 1$, $b = 1$, $c = 1$, $\sigma = 1$
$$\theta_{k+1} = \theta_{k} + \alpha \nabla_\theta V(\pi_\theta)|\theta_k$$
```
# simulation step T = 100
T = 10
def mu(theta, w):
return np.log(theta[0] * w + theta[1]) + theta[2]
def simSinglePath(theta):
wPath = np.zeros(T)
aPath = np.zeros(T)
rPath = np.zeros(T)
w = np.random.choice(ws)
for t in range(T):
c = np.random.normal(mu(theta, w), theta[3])
while c < 0.0001 or c > w+y-0.0001:
c = np.random.normal(mu(theta, w), theta[3])
wPath[t] = w
aPath[t] = c
rPath[t] = np.log(c)*(beta**t)
if np.random.uniform(0,1) > 0.5:
w = (w+y-c) * (1+r)
else:
w = (w+y-c) * (1-r)
return wPath, aPath, rPath
def gradientV(theta, D = 100):
'''
D is the sample size
'''
grad = np.zeros(len(theta))
newGrad = np.zeros(len(theta))
for d in range(D):
wp, ap, rp = simSinglePath(theta)
newGrad[0] = np.sum((ap - mu(theta, wp))/(theta[3]**2)*(w/(theta[0]*w + theta[1])))
newGrad[1] = np.sum((ap - mu(theta, wp))/(theta[3]**2)*(1/(theta[0]*w + theta[1])))
newGrad[2] = np.sum((ap - mu(theta, wp))/(theta[3]**2))
#newGrad[3] = np.sum((((ap - mu(theta, wp))**2 - theta[3]**2)/(theta[3]**3)))
grad += newGrad * np.sum(rp)
grad /= D
grad[-1] = 0
return grad
def updateTheta(theta):
theta = theta + alpha * gradientV(theta)
return theta
import time
def plot(theta):
def f(x):
return np.log(theta[0]*x + theta[1]) + theta[2]
plt.plot(ws, Cs, 'b')
plt.plot(ws, f(ws), 'r')
# c < 0 or c > w + 5, then reward -100
# initial theta
theta = [1,1,1,0.1]
# gradient ascend step size
alpha = 0.001
# store theta
THETA = np.zeros((3,10000))
for i in range(10000):
theta = updateTheta(theta)
THETA[:,i] = theta[:3]
plot(theta)
theta = [0.4, 1.00560229, 0.74852663, 0.1 ]
plt.plot(THETA[0,:])
plt.plot(THETA[1,:])
plt.plot(THETA[2,:])
def V(theta, w, D = 100):
def sPath(theta, w):
wPath = np.zeros(T)
aPath = np.zeros(T)
rPath = np.zeros(T)
for t in range(T):
c = np.random.normal(mu(theta, w), theta[3])
while c < 0.0001 or c > w+y-0.0001:
c = np.random.normal(mu(theta, w), theta[3])
wPath[t] = w
aPath[t] = c
rPath[t] = np.log(c)*(beta**t)
if np.random.uniform(0,1) > 0.5:
w = (w+y-c) * (1+r)
else:
w = (w+y-c) * (1-r)
return wPath, aPath, rPath
value = 0
for d in range(D):
_,_,rp = sPath(theta,w)
value += np.sum(rp)
return value/D
```
| github_jupyter |
##### Copyright 2019 The TensorFlow Authors.
```
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
```
# TFRecord and tf.Example
<table class="tfo-notebook-buttons" align="left">
<td>
<a target="_blank" href="https://www.tensorflow.org/tutorials/load_data/tfrecord"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
</td>
<td>
<a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/load_data/tfrecord.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
</td>
<td>
<a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/tutorials/load_data/tfrecord.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
</td>
<td>
<a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/load_data/tfrecord.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
</td>
</table>
To read data efficiently it can be helpful to serialize your data and store it in a set of files (100-200MB each) that can each be read linearly. This is especially true if the data is being streamed over a network. This can also be useful for caching any data-preprocessing.
The TFRecord format is a simple format for storing a sequence of binary records.
[Protocol buffers](https://developers.google.com/protocol-buffers/) are a cross-platform, cross-language library for efficient serialization of structured data.
Protocol messages are defined by `.proto` files, these are often the easiest way to understand a message type.
The `tf.Example` message (or protobuf) is a flexible message type that represents a `{"string": value}` mapping. It is designed for use with TensorFlow and is used throughout the higher-level APIs such as [TFX](https://www.tensorflow.org/tfx/).
This notebook will demonstrate how to create, parse, and use the `tf.Example` message, and then serialize, write, and read `tf.Example` messages to and from `.tfrecord` files.
Note: While useful, these structures are optional. There is no need to convert existing code to use TFRecords, unless you are using [`tf.data`](https://www.tensorflow.org/guide/datasets) and reading data is still the bottleneck to training. See [Data Input Pipeline Performance](https://www.tensorflow.org/guide/performance/datasets) for dataset performance tips.
## Setup
```
!pip install tf-nightly
import tensorflow as tf
import numpy as np
import IPython.display as display
```
## `tf.Example`
### Data types for `tf.Example`
Fundamentally, a `tf.Example` is a `{"string": tf.train.Feature}` mapping.
The `tf.train.Feature` message type can accept one of the following three types (See the [`.proto` file](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto) for reference). Most other generic types can be coerced into one of these:
1. `tf.train.BytesList` (the following types can be coerced)
- `string`
- `byte`
1. `tf.train.FloatList` (the following types can be coerced)
- `float` (`float32`)
- `double` (`float64`)
1. `tf.train.Int64List` (the following types can be coerced)
- `bool`
- `enum`
- `int32`
- `uint32`
- `int64`
- `uint64`
In order to convert a standard TensorFlow type to a `tf.Example`-compatible `tf.train.Feature`, you can use the shortcut functions below. Note that each function takes a scalar input value and returns a `tf.train.Feature` containing one of the three `list` types above:
```
# The following functions can be used to convert a value to a type compatible
# with tf.Example.
def _bytes_feature(value):
"""Returns a bytes_list from a string / byte."""
if isinstance(value, type(tf.constant(0))):
value = value.numpy() # BytesList won't unpack a string from an EagerTensor.
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
"""Returns a float_list from a float / double."""
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def _int64_feature(value):
"""Returns an int64_list from a bool / enum / int / uint."""
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
```
Note: To stay simple, this example only uses scalar inputs. The simplest way to handle non-scalar features is to use `tf.serialize_tensor` to convert tensors to binary-strings. Strings are scalars in tensorflow. Use `tf.parse_tensor` to convert the binary-string back to a tensor.
Below are some examples of how these functions work. Note the varying input types and the standardized output types. If the input type for a function does not match one of the coercible types stated above, the function will raise an exception (e.g. `_int64_feature(1.0)` will error out, since `1.0` is a float, so should be used with the `_float_feature` function instead):
```
print(_bytes_feature(b'test_string'))
print(_bytes_feature(u'test_bytes'.encode('utf-8')))
print(_float_feature(np.exp(1)))
print(_int64_feature(True))
print(_int64_feature(1))
```
All proto messages can be serialized to a binary-string using the `.SerializeToString` method:
```
feature = _float_feature(np.exp(1))
feature.SerializeToString()
```
### Creating a `tf.Example` message
Suppose you want to create a `tf.Example` message from existing data. In practice, the dataset may come from anywhere, but the procedure of creating the `tf.Example` message from a single observation will be the same:
1. Within each observation, each value needs to be converted to a `tf.train.Feature` containing one of the 3 compatible types, using one of the functions above.
1. You create a map (dictionary) from the feature name string to the encoded feature value produced in #1.
1. The map produced in step 2 is converted to a [`Features` message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/feature.proto#L85).
In this notebook, you will create a dataset using NumPy.
This dataset will have 4 features:
* a boolean feature, `False` or `True` with equal probability
* an integer feature uniformly randomly chosen from `[0, 5]`
* a string feature generated from a string table by using the integer feature as an index
* a float feature from a standard normal distribution
Consider a sample consisting of 10,000 independently and identically distributed observations from each of the above distributions:
```
# The number of observations in the dataset.
n_observations = int(1e4)
# Boolean feature, encoded as False or True.
feature0 = np.random.choice([False, True], n_observations)
# Integer feature, random from 0 to 4.
feature1 = np.random.randint(0, 5, n_observations)
# String feature
strings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat'])
feature2 = strings[feature1]
# Float feature, from a standard normal distribution
feature3 = np.random.randn(n_observations)
```
Each of these features can be coerced into a `tf.Example`-compatible type using one of `_bytes_feature`, `_float_feature`, `_int64_feature`. You can then create a `tf.Example` message from these encoded features:
```
def serialize_example(feature0, feature1, feature2, feature3):
"""
Creates a tf.Example message ready to be written to a file.
"""
# Create a dictionary mapping the feature name to the tf.Example-compatible
# data type.
feature = {
'feature0': _int64_feature(feature0),
'feature1': _int64_feature(feature1),
'feature2': _bytes_feature(feature2),
'feature3': _float_feature(feature3),
}
# Create a Features message using tf.train.Example.
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
```
For example, suppose you have a single observation from the dataset, `[False, 4, bytes('goat'), 0.9876]`. You can create and print the `tf.Example` message for this observation using `create_message()`. Each single observation will be written as a `Features` message as per the above. Note that the `tf.Example` [message](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/example/example.proto#L88) is just a wrapper around the `Features` message:
```
# This is an example observation from the dataset.
example_observation = []
serialized_example = serialize_example(False, 4, b'goat', 0.9876)
serialized_example
```
To decode the message use the `tf.train.Example.FromString` method.
```
example_proto = tf.train.Example.FromString(serialized_example)
example_proto
```
## TFRecords format details
A TFRecord file contains a sequence of records. The file can only be read sequentially.
Each record contains a byte-string, for the data-payload, plus the data-length, and CRC32C (32-bit CRC using the Castagnoli polynomial) hashes for integrity checking.
Each record is stored in the following formats:
uint64 length
uint32 masked_crc32_of_length
byte data[length]
uint32 masked_crc32_of_data
The records are concatenated together to produce the file. CRCs are
[described here](https://en.wikipedia.org/wiki/Cyclic_redundancy_check), and
the mask of a CRC is:
masked_crc = ((crc >> 15) | (crc << 17)) + 0xa282ead8ul
Note: There is no requirement to use `tf.Example` in TFRecord files. `tf.Example` is just a method of serializing dictionaries to byte-strings. Lines of text, encoded image data, or serialized tensors (using `tf.io.serialize_tensor`, and
`tf.io.parse_tensor` when loading). See the `tf.io` module for more options.
## TFRecord files using `tf.data`
The `tf.data` module also provides tools for reading and writing data in TensorFlow.
### Writing a TFRecord file
The easiest way to get the data into a dataset is to use the `from_tensor_slices` method.
Applied to an array, it returns a dataset of scalars:
```
tf.data.Dataset.from_tensor_slices(feature1)
```
Applied to a tuple of arrays, it returns a dataset of tuples:
```
features_dataset = tf.data.Dataset.from_tensor_slices((feature0, feature1, feature2, feature3))
features_dataset
# Use `take(1)` to only pull one example from the dataset.
for f0,f1,f2,f3 in features_dataset.take(1):
print(f0)
print(f1)
print(f2)
print(f3)
```
Use the `tf.data.Dataset.map` method to apply a function to each element of a `Dataset`.
The mapped function must operate in TensorFlow graph mode—it must operate on and return `tf.Tensors`. A non-tensor function, like `serialize_example`, can be wrapped with `tf.py_function` to make it compatible.
Using `tf.py_function` requires to specify the shape and type information that is otherwise unavailable:
```
def tf_serialize_example(f0,f1,f2,f3):
tf_string = tf.py_function(
serialize_example,
(f0,f1,f2,f3), # pass these args to the above function.
tf.string) # the return type is `tf.string`.
return tf.reshape(tf_string, ()) # The result is a scalar
tf_serialize_example(f0,f1,f2,f3)
```
Apply this function to each element in the dataset:
```
serialized_features_dataset = features_dataset.map(tf_serialize_example)
serialized_features_dataset
def generator():
for features in features_dataset:
yield serialize_example(*features)
serialized_features_dataset = tf.data.Dataset.from_generator(
generator, output_types=tf.string, output_shapes=())
serialized_features_dataset
```
And write them to a TFRecord file:
```
filename = 'test.tfrecord'
writer = tf.data.experimental.TFRecordWriter(filename)
writer.write(serialized_features_dataset)
```
### Reading a TFRecord file
You can also read the TFRecord file using the `tf.data.TFRecordDataset` class.
More information on consuming TFRecord files using `tf.data` can be found [here](https://www.tensorflow.org/guide/datasets#consuming_tfrecord_data).
Using `TFRecordDataset`s can be useful for standardizing input data and optimizing performance.
```
filenames = [filename]
raw_dataset = tf.data.TFRecordDataset(filenames)
raw_dataset
```
At this point the dataset contains serialized `tf.train.Example` messages. When iterated over it returns these as scalar string tensors.
Use the `.take` method to only show the first 10 records.
Note: iterating over a `tf.data.Dataset` only works with eager execution enabled.
```
for raw_record in raw_dataset.take(10):
print(repr(raw_record))
```
These tensors can be parsed using the function below. Note that the `feature_description` is necessary here because datasets use graph-execution, and need this description to build their shape and type signature:
```
# Create a description of the features.
feature_description = {
'feature0': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'feature1': tf.io.FixedLenFeature([], tf.int64, default_value=0),
'feature2': tf.io.FixedLenFeature([], tf.string, default_value=''),
'feature3': tf.io.FixedLenFeature([], tf.float32, default_value=0.0),
}
def _parse_function(example_proto):
# Parse the input `tf.Example` proto using the dictionary above.
return tf.io.parse_single_example(example_proto, feature_description)
```
Alternatively, use `tf.parse example` to parse the whole batch at once. Apply this function to each item in the dataset using the `tf.data.Dataset.map` method:
```
parsed_dataset = raw_dataset.map(_parse_function)
parsed_dataset
```
Use eager execution to display the observations in the dataset. There are 10,000 observations in this dataset, but you will only display the first 10. The data is displayed as a dictionary of features. Each item is a `tf.Tensor`, and the `numpy` element of this tensor displays the value of the feature:
```
for parsed_record in parsed_dataset.take(10):
print(repr(parsed_record))
```
Here, the `tf.parse_example` function unpacks the `tf.Example` fields into standard tensors.
## TFRecord files in Python
The `tf.io` module also contains pure-Python functions for reading and writing TFRecord files.
### Writing a TFRecord file
Next, write the 10,000 observations to the file `test.tfrecord`. Each observation is converted to a `tf.Example` message, then written to file. You can then verify that the file `test.tfrecord` has been created:
```
# Write the `tf.Example` observations to the file.
with tf.io.TFRecordWriter(filename) as writer:
for i in range(n_observations):
example = serialize_example(feature0[i], feature1[i], feature2[i], feature3[i])
writer.write(example)
!du -sh {filename}
```
### Reading a TFRecord file
These serialized tensors can be easily parsed using `tf.train.Example.ParseFromString`:
```
filenames = [filename]
raw_dataset = tf.data.TFRecordDataset(filenames)
raw_dataset
for raw_record in raw_dataset.take(1):
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
print(example)
```
## Walkthrough: Reading and writing image data
This is an end-to-end example of how to read and write image data using TFRecords. Using an image as input data, you will write the data as a TFRecord file, then read the file back and display the image.
This can be useful if, for example, you want to use several models on the same input dataset. Instead of storing the image data raw, it can be preprocessed into the TFRecords format, and that can be used in all further processing and modelling.
First, let's download [this image](https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg) of a cat in the snow and [this photo](https://upload.wikimedia.org/wikipedia/commons/f/fe/New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg) of the Williamsburg Bridge, NYC under construction.
### Fetch the images
```
cat_in_snow = tf.keras.utils.get_file('320px-Felis_catus-cat_on_snow.jpg', 'https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg')
williamsburg_bridge = tf.keras.utils.get_file('194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg','https://storage.googleapis.com/download.tensorflow.org/example_images/194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg')
display.display(display.Image(filename=cat_in_snow))
display.display(display.HTML('Image cc-by: <a "href=https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg">Von.grzanka</a>'))
display.display(display.Image(filename=williamsburg_bridge))
display.display(display.HTML('<a "href=https://commons.wikimedia.org/wiki/File:New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg">From Wikimedia</a>'))
```
### Write the TFRecord file
As before, encode the features as types compatible with `tf.Example`. This stores the raw image string feature, as well as the height, width, depth, and arbitrary `label` feature. The latter is used when you write the file to distinguish between the cat image and the bridge image. Use `0` for the cat image, and `1` for the bridge image:
```
image_labels = {
cat_in_snow : 0,
williamsburg_bridge : 1,
}
# This is an example, just using the cat image.
image_string = open(cat_in_snow, 'rb').read()
label = image_labels[cat_in_snow]
# Create a dictionary with features that may be relevant.
def image_example(image_string, label):
image_shape = tf.image.decode_jpeg(image_string).shape
feature = {
'height': _int64_feature(image_shape[0]),
'width': _int64_feature(image_shape[1]),
'depth': _int64_feature(image_shape[2]),
'label': _int64_feature(label),
'image_raw': _bytes_feature(image_string),
}
return tf.train.Example(features=tf.train.Features(feature=feature))
for line in str(image_example(image_string, label)).split('\n')[:15]:
print(line)
print('...')
```
Notice that all of the features are now stored in the `tf.Example` message. Next, functionalize the code above and write the example messages to a file named `images.tfrecords`:
```
# Write the raw image files to `images.tfrecords`.
# First, process the two images into `tf.Example` messages.
# Then, write to a `.tfrecords` file.
record_file = 'images.tfrecords'
with tf.io.TFRecordWriter(record_file) as writer:
for filename, label in image_labels.items():
image_string = open(filename, 'rb').read()
tf_example = image_example(image_string, label)
writer.write(tf_example.SerializeToString())
!du -sh {record_file}
```
### Read the TFRecord file
You now have the file—`images.tfrecords`—and can now iterate over the records in it to read back what you wrote. Given that in this example you will only reproduce the image, the only feature you will need is the raw image string. Extract it using the getters described above, namely `example.features.feature['image_raw'].bytes_list.value[0]`. You can also use the labels to determine which record is the cat and which one is the bridge:
```
raw_image_dataset = tf.data.TFRecordDataset('images.tfrecords')
# Create a dictionary describing the features.
image_feature_description = {
'height': tf.io.FixedLenFeature([], tf.int64),
'width': tf.io.FixedLenFeature([], tf.int64),
'depth': tf.io.FixedLenFeature([], tf.int64),
'label': tf.io.FixedLenFeature([], tf.int64),
'image_raw': tf.io.FixedLenFeature([], tf.string),
}
def _parse_image_function(example_proto):
# Parse the input tf.Example proto using the dictionary above.
return tf.io.parse_single_example(example_proto, image_feature_description)
parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
parsed_image_dataset
```
Recover the images from the TFRecord file:
```
for image_features in parsed_image_dataset:
image_raw = image_features['image_raw'].numpy()
display.display(display.Image(data=image_raw))
```
| github_jupyter |
# YAHOO電影爬蟲練習
## 練習爬取電影放映資訊。必須逐步獲取電影的代號、放映地區、放映日期後,再送出查詢給伺服器。
```
import requests
import re
from bs4 import BeautifulSoup
```
### 先搜尋全部的電影代號(ID)資訊
```
# 查看目前上映那些電影,並擷取出其ID資訊
url = 'https://movies.yahoo.com.tw/'
resp = requests.get(url)
resp.encoding = 'utf-8'
# gggggg
soup = BeautifulSoup(resp.text, 'lxml')
html = soup.find("select", attrs={'name':'movie_id'})
movie_item = html.find_all("option", attrs={'data-name':re.compile('.*')})
for p in movie_item:
print("Movie: %s, ID: %s" % (p["data-name"], p["value"]))
```
### 指定你有興趣的電影其ID,然後查詢其放映地區資訊。
```
# 參考前一個步驟中擷取到的ID資訊,並指定ID
movie_id = 10477
url = 'https://movies.yahoo.com.tw/api/v1/areas_by_movie_theater'
payload = {'movie_id':str(movie_id)}
# 模擬一個header
headers = {
'authority': 'movies.yahoo.com.tw',
'method': 'GET',
'path': '/api/v1/areas_by_movie_theater?movie_id=' + str(movie_id),
'scheme': 'https',
'accept': 'application/json, text/javascript, */*; q=0.01',
'accept-encoding': 'gzip, deflate, br',
'accept-language': 'zh-TW,zh;q=0.9,en-US;q=0.8,en;q=0.7,zh-CN;q=0.6',
'cookie': 'rxx=9s3x2fws06.1g16irnc&v=1; _ga=GA1.3.2056742944.1551651301; GUC=AQEBAQFczFpdm0IfmwSB&s=AQAAACoo4N5D&g=XMsVBw; BX=4hkdk1decm57t&b=3&s=mr; _ga=GA1.4.2056742944.1551651301; nexagesuid=82843256dd234e8e91aa73f2062f8218; browsed_movie=eyJpdiI6IlJXWWtiSWJaZlNGK2MxQnhscnVUYWc9PSIsInZhbHVlIjoiMXRhMmVHRXRIeUNjc1RBWDJzdGYwbnlIQURmWGsrcjJSMzhkbkcraDNJVUNIZEZsbzU3amlFcVZ1NzlmazJrTGpoMjVrbHk1YmpoRENXaHZTOUw1TmI2ZTZVWHdOejZQZm16RmVuMWlHTTJLaTZLVFZZVkFOMDlTd1wvSGltcytJIiwibWFjIjoiZWQ2ZjA4MmVjZmZlYjlmNjJmYmY2NGMyMDI0Njc0NWViYjVkOWE2NDg0N2RhODMxZjBjZDhiMmJhZTc2MDZhYiJ9; avi=eyJpdiI6Im1NeWFJRlVRWDR1endEcGRGUGJUbVE9PSIsInZhbHVlIjoickRpU3JuUytmcGl6cjF5OW0rNU9iZz09IiwibWFjIjoiY2VmY2NkNzZmM2NhNjY5YzlkOTcyNjE5OGEyMzU0NWYxOTdmMDRkMDY3OWNmMmZjOTMxYjc5MjI5N2Q5NGE5MiJ9; cmp=t=1559391030&j=0; _gid=GA1.4.779543841.1559391031; XSRF-TOKEN=eyJpdiI6IkhpS2hGcDRQaHlmWUJmaHdSS2Q2bHc9PSIsInZhbHVlIjoiOUVoNFk4OHI1UUZmUWRtYXhza0MyWjJSTlhlZ3RnT0VGeVJPN2JuczVRMGRFdWt2OUlsamVKeHRobFwvcHBGM0dhU3VyMXNGTHlsb2dVM2l0U1hpUGxBPT0iLCJtYWMiOiJkZWU4YzJhNjAxMTY3MzE4Y2ExNWIxYmE1ZjE1YWZlZTlhOTcyYjc4M2RlZGY4ZWNjZDYyMTA2NGYwZGViMzc2In0%3D; m_s=eyJpdiI6InpsZHZ2Tk1BZ0dxaHhETml1RjBnUXc9PSIsInZhbHVlIjoiSkNGeHUranRoXC85bDFiaDhySTJqNkJRcWdjWUxjeVRJSHVYZ1wvd2d4bWJZUTUrSHVDM0lUcW5KNHdETFZ4T1lieU81OUhzc1VoUXhZcWk0UDZSQXVFdz09IiwibWFjIjoiYmJkMDJkMDhlODIzMzcyMWY4M2NmYWNjNGVlOWRjMDIwZmVmNzAyMjE3Yzg3ZGY3ODBkZWEzZTI4MTI5ZWNmOSJ9; _gat=1; nexagesd=10',
'dnt': '1',
'mv-authorization': '21835b082e15b91a69b3851eec7b31b82ce82afb',
'referer': 'https://movies.yahoo.com.tw/',
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'x-requested-with': 'XMLHttpRequest',
}
resp = requests.get(url, params=payload, headers=headers)
#print(resp.json()) # 若有需要,列印出json原始碼
# 這裡回傳的格式是JSON格式的資料,要解析JSON擷取資料
for p in resp.json():
print('放映地區: {0}, 代號(area_id): {1}'.format(p['title'], p['area_id']))
```
### 指定你想要觀看的放映地區,查詢有上映電影的場次日期
```
# 指定放映地區
area_id = 28
# 向網站發送請求
url = 'https://movies.yahoo.com.tw/movietime_result.html'
payload = {'movie_id':str(movie_id), 'area_id':str(area_id)}
resp = requests.get(url, params=payload)
resp.encoding = 'utf-8'
soup = BeautifulSoup(resp.text, 'lxml')
movie_date = soup.find_all("label", attrs={'for':re.compile("date_[\d]")})
# 列印播放日期
for date in movie_date:
print("%s %s" % (date.p.string, date.h3.string))
```
### 最後指定觀看的日期,查詢並列印出放映的電影院、放映類型(數位、3D、IMAX 3D...)、放映時間等資訊。
```
# 選定要觀看的日期
date = "2019-08-21"
# 向網站發送請求,獲取上映的電影院及時間資訊
url = "https://movies.yahoo.com.tw/ajax/pc/get_schedule_by_movie"
payload = {'movie_id':str(movie_id),
'date':date,
'area_id':str(area_id),
'theater_id':'',
'datetime':'',
'movie_type_id':''}
resp = requests.get(url, params=payload)
#print(resp.json()['view']) # 若有需要,列印出json原始碼
soup = BeautifulSoup(resp.json()['view'], 'lxml')
html = soup.find_all("ul", attrs={'data-theater_name':re.compile(".*")})
'''
試著從上一步驟回傳的電影院資料中,擷取電影院名稱、影片放映類型以及時間表
Your code here.
'''
```
| github_jupyter |
# Flopy MODFLOW 6 (MF6) Support
The Flopy library contains classes for creating, saving, running, loading, and modifying MF6 simulations. The MF6 portion of the flopy library is located in:
*flopy.mf6*
While there are a number of classes in flopy.mf6, to get started you only need to use the main classes summarized below:
flopy.mf6.MFSimulation
* MODFLOW Simulation Class. Entry point into any MODFLOW simulation.
flopy.mf6.ModflowGwf
* MODFLOW Groundwater Flow Model Class. Represents a single model in a simulation.
flopy.mf6.Modflow[pc]
* MODFLOW package classes where [pc] is the abbreviation of the package name. Each package is a separate class.
For packages that are part of a groundwater flow model, the abbreviation begins with "Gwf". For example, "flopy.mf6.ModflowGwfdis" is the Discretization package.
```
import os
import sys
from shutil import copyfile
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
print(sys.version)
print('numpy version: {}'.format(np.__version__))
print('matplotlib version: {}'.format(mpl.__version__))
print('flopy version: {}'.format(flopy.__version__))
```
# Creating a MF6 Simulation
A MF6 simulation is created by first creating a simulation object "MFSimulation". When you create the simulation object you can define the simulation's name, version, executable name, workspace path, and the name of the tdis file. All of these are optional parameters, and if not defined each one will default to the following:
sim_name='modflowtest'
version='mf6'
exe_name='mf6.exe'
sim_ws='.'
sim_tdis_file='modflow6.tdis'
```
import os
import sys
from shutil import copyfile
try:
import flopy
except:
fpth = os.path.abspath(os.path.join('..', '..'))
sys.path.append(fpth)
import flopy
sim_name = 'example_sim'
sim_path = os.path.join('data', 'example_project')
sim = flopy.mf6.MFSimulation(sim_name=sim_name, version='mf6', exe_name='mf6',
sim_ws=sim_path)
```
The next step is to create a tdis package object "ModflowTdis". The first parameter of the ModflowTdis class is a simulation object, which ties a ModflowTdis object to a specific simulation. The other parameters and their definitions can be found in the docstrings.
```
tdis = flopy.mf6.ModflowTdis(sim, pname='tdis', time_units='DAYS', nper=2,
perioddata=[(1.0, 1, 1.0), (10.0, 5, 1.0)])
```
Next one or more models are created using the ModflowGwf class. The first parameter of the ModflowGwf class is the simulation object that the model will be a part of.
```
model_name = 'example_model'
model = flopy.mf6.ModflowGwf(sim, modelname=model_name,
model_nam_file='{}.nam'.format(model_name))
```
Next create one or more Iterative Model Solution (IMS) files.
```
ims_package = flopy.mf6.ModflowIms(sim, pname='ims', print_option='ALL',
complexity='SIMPLE', outer_hclose=0.00001,
outer_maximum=50, under_relaxation='NONE',
inner_maximum=30, inner_hclose=0.00001,
linear_acceleration='CG',
preconditioner_levels=7,
preconditioner_drop_tolerance=0.01,
number_orthogonalizations=2)
```
Each ModflowGwf object needs to be associated with an ModflowIms object. This is done by calling the MFSimulation object's "register_ims_package" method. The first parameter in this method is the ModflowIms object and the second parameter is a list of model names (strings) for the models to be associated with the ModflowIms object.
```
sim.register_ims_package(ims_package, [model_name])
```
Next add packages to each model. The first package added needs to be a spatial discretization package since flopy uses information from the spatial discretization package to help you build other packages. There are three spatial discretization packages to choose from:
DIS (ModflowGwfDis) - Structured discretization
DISV (ModflowGwfdisv) - Discretization with vertices
DISU (ModflowGwfdisu) - Unstructured discretization
```
dis_package = flopy.mf6.ModflowGwfdis(model, pname='dis', length_units='FEET', nlay=2,
nrow=2, ncol=5, delr=500.0,
delc=500.0,
top=100.0, botm=[50.0, 20.0],
filename='{}.dis'.format(model_name))
```
## Accessing Namefiles
Namefiles are automatically built for you by flopy. However, there are some options contained in the namefiles that you may want to set. To get the namefile object access the name_file attribute in either a simulation or model object to get the simulation or model namefile.
```
# set the nocheck property in the simulation namefile
sim.name_file.nocheck = True
# set the print_input option in the model namefile
model.name_file.print_input = True
```
## Specifying Options
Option that appear alone are assigned a boolean value, like the print_input option above. Options that have additional optional parameters are assigned using a tuple, with the entries containing the names of the optional parameters to turn on. Use a tuple with an empty string to indicate no optional parameters and use a tuple with None to turn the option off.
```
# Turn Newton option on with under relaxation
model.name_file.newtonoptions = ('UNDER_RELAXATION')
# Turn Newton option on without under relaxation
model.name_file.newtonoptions = ('')
# Turn off Newton option
model.name_file.newtonoptions = (None)
```
## MFArray Templates
Lastly define all other packages needed.
Note that flopy supports a number of ways to specify data for a package. A template, which defines the data array shape for you, can be used to specify the data. Templates are built by calling the empty of the data type you are building. For example, to build a template for k in the npf package you would call:
ModflowGwfnpf.k.empty()
The empty method for "MFArray" data templates (data templates whose size is based on the structure of the model grid) take up to four parameters:
* model - The model object that the data is a part of. A valid model object with a discretization package is required in order to build the proper array dimensions. This parameter is required.
* layered - True or false whether the data is layered or not.
* data_storage_type_list - List of data storage types, one for each model layer. If the template is not layered, only one data storage type needs to be specified. There are three data storage types supported, internal_array, internal_constant, and external_file.
* default_value - The initial value for the array.
```
# build a data template for k that stores the first layer as an internal array and the second
# layer as a constant with the default value of k for all layers set to 100.0
layer_storage_types = [flopy.mf6.data.mfdatastorage.DataStorageType.internal_array,
flopy.mf6.data.mfdatastorage.DataStorageType.internal_constant]
k_template = flopy.mf6.ModflowGwfnpf.k.empty(model, True, layer_storage_types, 100.0)
# change the value of the second layer to 50.0
k_template[0]['data'] = [65.0, 60.0, 55.0, 50.0, 45.0, 40.0, 35.0, 30.0, 25.0, 20.0]
k_template[0]['factor'] = 1.5
print(k_template)
# create npf package using the k template to define k
npf_package = flopy.mf6.ModflowGwfnpf(model, pname='npf', save_flows=True, icelltype=1, k=k_template)
```
## Specifying MFArray Data
MFArray data can also be specified as a numpy array, a list of values, or a single value. Below strt (starting heads) are defined as a single value, 100.0, which is interpreted as an internal constant storage type of value 100.0. Strt could also be defined as a list defining a value for every model cell:
strt=[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0]
Or as a list defining a value or values for each model layer:
strt=[100.0, 90.0]
or:
strt=[[100.0], [90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0]]
MFArray data can also be stored in an external file by using a dictionary using the keys 'filename' to specify the file name relative to the model folder and 'data' to specific the data. The optional 'factor', 'iprn', and 'binary' keys may also be used.
strt={'filename': 'strt.txt', 'factor':1.0, 'data':[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0], 'binary': 'True'}
If the 'data' key is omitted from the dictionary flopy will try to read the data from an existing file 'filename'. Any relative paths for loading data from a file should specified relative to the MF6 simulation folder.
```
strt={'filename': 'strt.txt', 'factor':1.0, 'data':[100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0,
90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0, 90.0], 'binary': 'True'}
ic_package = flopy.mf6.ModflowGwfic(model, pname='ic', strt=strt,
filename='{}.ic'.format(model_name))
# move external file data into model folder
icv_data_path = os.path.join('..', 'data', 'mf6', 'notebooks', 'iconvert.txt')
copyfile(icv_data_path, os.path.join(sim_path, 'iconvert.txt'))
# create storage package
sto_package = flopy.mf6.ModflowGwfsto(model, pname='sto', save_flows=True, iconvert={'filename':'iconvert.txt'},
ss=[0.000001, 0.000002],
sy=[0.15, 0.14, 0.13, 0.12, 0.11, 0.11, 0.12, 0.13, 0.14, 0.15,
0.15, 0.14, 0.13, 0.12, 0.11, 0.11, 0.12, 0.13, 0.14, 0.15])
```
## MFList Templates
Flopy supports specifying record and recarray "MFList" data in a number of ways. Templates can be created that define the shape of the data. The empty method for "MFList" data templates take up to 7 parameters.
* model - The model object that the data is a part of. A valid model object with a discretization package is required in order to build the proper array dimensions. This parameter is required.
* maxbound - The number of rows in the recarray. If not specified one row is returned.
* aux_vars - List of auxiliary variable names. If not specified auxiliary variables are not used.
* boundnames - True/False if boundnames is to be used.
* nseg - Number of segments (only relevant for a few data types)
* timeseries - True/False indicates that time series data will be used.
* stress_periods - List of integer stress periods to be used (transient MFList data only). If not specified for transient data, template will only be defined for stress period 1.
MFList transient data templates are numpy recarrays stored in a dictionary with the dictionary key an integer zero based stress period value (stress period - 1).
In the code below the well package is set up using a transient MFList template to help build the well's stress_periods.
```
maxbound = 2
# build a stress_period_data template with 2 wells over stress periods 1 and 2 with boundnames
# and three aux variables
wel_periodrec = flopy.mf6.ModflowGwfwel.stress_period_data.empty(model, maxbound=maxbound, boundnames=True,
aux_vars=['var1', 'var2', 'var3'],
stress_periods=[0,1])
# define the two wells for stress period one
wel_periodrec[0][0] = ((0,1,2), -50.0, -1, -2, -3, 'First Well')
wel_periodrec[0][1] = ((1,1,4), -25.0, 2, 3, 4, 'Second Well')
# define the two wells for stress period two
wel_periodrec[1][0] = ((0,1,2), -200.0, -1, -2, -3, 'First Well')
wel_periodrec[1][1] = ((1,1,4), -4000.0, 2, 3, 4, 'Second Well')
# build the well package
wel_package = flopy.mf6.ModflowGwfwel(model, pname='wel', print_input=True, print_flows=True,
auxiliary=['var1', 'var2', 'var3'], maxbound=maxbound,
stress_period_data=wel_periodrec, boundnames=True, save_flows=True)
```
## Cell IDs
Cell IDs always appear as tuples in an MFList. For a structured grid cell IDs appear as:
(<layer>, <row>, <column>)
For vertice based grid cells IDs appear as:
(<layer>, <intralayer_cell_id>)
Unstructured grid cell IDs appear as:
(<cell_id>)
## Specifying MFList Data
MFList data can also be defined as a list of tuples, with each tuple being a row of the recarray. For transient data the list of tuples can be stored in a dictionary with the dictionary key an integer zero based stress period value. If only a list of tuples is specified for transient data, the data is assumed to apply to stress period 1. Additional stress periods can be added with the add_transient_key method. The code below defines saverecord and printrecord as a list of tuples.
```
# printrecord data as a list of tuples. since no stress
# period is specified it will default to stress period 1
printrec_tuple_list = [('HEAD', 'ALL'), ('BUDGET', 'ALL')]
# saverecord data as a dictionary of lists of tuples for
# stress periods 1 and 2.
saverec_dict = {0:[('HEAD', 'ALL'), ('BUDGET', 'ALL')],1:[('HEAD', 'ALL'), ('BUDGET', 'ALL')]}
# create oc package
oc_package = flopy.mf6.ModflowGwfoc(model, pname='oc',
budget_filerecord=[('{}.cbc'.format(model_name),)],
head_filerecord=[('{}.hds'.format(model_name),)],
saverecord=saverec_dict,
printrecord=printrec_tuple_list)
# add stress period two to the print record
oc_package.printrecord.add_transient_key(1)
# set the data for stress period two in the print record
oc_package.printrecord.set_data([('HEAD', 'ALL'), ('BUDGET', 'ALL')], 1)
```
### Specifying MFList Data in an External File
MFList data can be specified in an external file using a dictionary with the 'filename' key. If the 'data' key is also included in the dictionary and is not None, flopy will create the file with the data contained in the 'data' key. The 'binary' key can be used to save data to a binary file ('binary': True). The code below creates a chd package which creates and references an external file containing data for stress period 1 and stores the data internally in the chd package file for stress period 2.
```
stress_period_data = {0: {'filename': 'chd_sp1.dat', 'data': [[(0, 0, 0), 70.]]},
1: [[(0, 0, 0), 60.]]}
chd = flopy.mf6.ModflowGwfchd(model, maxbound=1, stress_period_data=stress_period_data)
```
## Packages that Support both List-based and Array-based Data
The recharge and evapotranspiration packages can be specified using list-based or array-based input. The array packages have an "a" on the end of their name:
ModflowGwfrch - list based recharge package
ModflowGwfrcha - array based recharge package
ModflowGwfevt - list based evapotranspiration package
ModflowGwfevta - array based evapotranspiration package
```
rch_recarray = {0:[((0,0,0), 'rch_1'), ((1,1,1), 'rch_2')],
1:[((0,0,0), 'rch_1'), ((1,1,1), 'rch_2')]}
rch_package = flopy.mf6.ModflowGwfrch(model, pname='rch', fixed_cell=True, print_input=True,
maxbound=2, stress_period_data=rch_recarray)
```
## Utility Files (TS, TAS, OBS, TAB)
Utility files, MF6 formatted files that reference by packages, include time series, time array series, observation, and tab files. The file names for utility files are specified using the package that references them. The utility files can be created in several ways. A simple case is demonstrated below. More detail is given in the flopy3_mf6_obs_ts_tas notebook.
```
# build a time series array for the recharge package
ts_data = [(0.0, 0.015, 0.0017), (1.0, 0.016, 0.0019), (2.0, 0.012, 0.0015),
(3.0, 0.020, 0.0014), (4.0, 0.015, 0.0021), (5.0, 0.013, 0.0012),
(6.0, 0.022, 0.0012), (7.0, 0.016, 0.0014), (8.0, 0.013, 0.0011),
(9.0, 0.021, 0.0011), (10.0, 0.017, 0.0016), (11.0, 0.012, 0.0015)]
rch_package.ts.initialize(time_series_namerecord=['rch_1', 'rch_2'],
timeseries=ts_data, filename='recharge_rates.ts',
interpolation_methodrecord=['stepwise', 'stepwise'])
# build an recharge observation package that outputs the western recharge to a binary file and the eastern
# recharge to a text file
obs_data = {('rch_west.csv', 'binary'): [('rch_1_1_1', 'RCH', (0, 0, 0)),
('rch_1_2_1', 'RCH', (0, 1, 0))],
'rch_east.csv': [('rch_1_1_5', 'RCH', (0, 0, 4)),
('rch_1_2_5', 'RCH', (0, 1, 4))]}
rch_package.obs.initialize(filename='example_model.rch.obs', digits=10,
print_input=True, continuous=obs_data)
```
# Saving and Running a MF6 Simulation
Saving and running a simulation are done with the MFSimulation class's write_simulation and run_simulation methods.
```
# write simulation to new location
sim.write_simulation()
# run simulation
sim.run_simulation()
```
# Exporting a MF6 Model
Exporting a MF6 model to a shapefile or netcdf is the same as exporting a MF2005 model.
```
# make directory
pth = os.path.join('data', 'netCDF_export')
if not os.path.exists(pth):
os.makedirs(pth)
# export the dis package to a netcdf file
model.dis.export(os.path.join(pth, 'dis.nc'))
# export the botm array to a shapefile
model.dis.botm.export(os.path.join(pth, 'botm.shp'))
```
# Loading an Existing MF6 Simulation
Loading a simulation can be done with the flopy.mf6.MFSimulation.load static method.
```
# load the simulation
loaded_sim = flopy.mf6.MFSimulation.load(sim_name, 'mf6', 'mf6', sim_path)
```
# Retrieving Data and Modifying an Existing MF6 Simulation
Data can be easily retrieved from a simulation. Data can be retrieved using two methods. One method is to retrieve the data object from a master simulation dictionary that keeps track of all the data. The master simulation dictionary is accessed by accessing a simulation's "simulation_data" property and then the "mfdata" property:
sim.simulation_data.mfdata[<data path>]
The data path is the path to the data stored as a tuple containing the model name, package name, block name, and data name.
The second method is to get the data from the package object. If you do not already have the package object, you can work your way down the simulation structure, from the simulation to the correct model, to the correct package, and finally to the data object.
These methods are demonstrated in the code below.
```
# get hydraulic conductivity data object from the data dictionary
hk = sim.simulation_data.mfdata[(model_name, 'npf', 'griddata', 'k')]
# get specific yield data object from the storage package
sy = sto_package.sy
# get the model object from the simulation object using the get_model method,
# which takes a string with the model's name and returns the model object
mdl = sim.get_model(model_name)
# get the package object from the model mobject using the get_package method,
# which takes a string with the package's name or type
ic = mdl.get_package('ic')
# get the data object from the initial condition package object
strt = ic.strt
```
Once you have the appropriate data object there are a number methods to retrieve data from that object. Data retrieved can either be the data as it appears in the model file or the data with any factor specified in the model file applied to it. To get the raw data without applying a factor use the get_data method. To get the data with the factor already applied use .array.
Note that MFArray data is always a copy of the data stored by flopy. Modifying the copy of the flopy data will have no affect on the data stored in flopy. Non-constant internal MFList data is returned as a reference to a numpy recarray. Modifying this recarray will modify the data stored in flopy.
```
# get the data without applying any factor
hk_data_no_factor = hk.get_data()
print('Data without factor:\n{}\n'.format(hk_data_no_factor))
# get data with factor applied
hk_data_factor = hk.array
print('Data with factor:\n{}\n'.format(hk_data_factor))
```
Data can also be retrieved from the data object using []. For unlayered data the [] can be used to slice the data.
```
# slice layer one row two
print('SY slice of layer on row two\n{}\n'.format(sy[0,:,2]))
```
For layered data specify the layer number within the brackets. This will return a "LayerStorage" object which let's you change attributes of an individual layer.
```
# get layer one LayerStorage object
hk_layer_one = hk[0]
# change the print code and factor for layer one
hk_layer_one.iprn = '2'
hk_layer_one.factor = 1.1
print('Layer one data without factor:\n{}\n'.format(hk_layer_one.get_data()))
print('Data with new factor:\n{}\n'.format(hk.array))
```
## Modifying Data
Data can be modified in several ways. One way is to set data for a given layer within a LayerStorage object, like the one accessed in the code above. Another way is to set the data attribute to the new data. Yet another way is to call the data object's set_data method.
```
# set data within a LayerStorage object
hk_layer_one.set_data([120.0, 100.0, 80.0, 70.0, 60.0, 50.0, 40.0, 30.0, 25.0, 20.0])
print('New HK data no factor:\n{}\n'.format(hk.get_data()))
# set data attribute to new data
ic_package.strt = 150.0
print('New strt values:\n{}\n'.format(ic_package.strt.array))
# call set_data
sto_package.ss.set_data([0.000003, 0.000004])
print('New ss values:\n{}\n'.format(sto_package.ss.array))
```
## Modifying the Simulation Path
The simulation path folder can be changed by using the set_sim_path method in the MFFileMgmt object. The MFFileMgmt object can be obtained from the simulation object through properties:
sim.simulation_data.mfpath
```
# create new path
save_folder = os.path.join(sim_path, 'sim_modified')
# change simulation path
sim.simulation_data.mfpath.set_sim_path(save_folder)
# create folder
if not os.path.isdir(save_folder):
os.makedirs(save_folder)
```
## Adding a Model Relative Path
A model relative path lets you put all of the files associated with a model in a folder relative to the simulation folder. Warning, this will override all of your file paths to model package files and will also override any relative file paths to external model data files.
```
# Change path of model files relative to the simulation folder
model.set_model_relative_path('model_folder')
# create folder
if not os.path.isdir(save_folder):
os.makedirs(os.path.join(save_folder,'model_folder'))
# write simulation to new folder
sim.write_simulation()
# run simulation from new folder
sim.run_simulation()
```
## Post-Processing the Results
Results can be retrieved from the master simulation dictionary. Results are retrieved from the master simulation dictionary with using a tuple key that identifies the data to be retrieved. For head data use the key
('<model name>', 'HDS', 'HEAD')
where <model name> is the name of your model. For cell by cell budget data use the key
('<model name>', 'CBC', '<flow data name>')
where <flow data name> is the name of the flow data to be retrieved (ex. 'FLOW-JA-FACE'). All available output keys can be retrieved using the output_keys method.
```
keys = sim.simulation_data.mfdata.output_keys()
```
The entries in the list above are keys for data in the head file "HDS" and data in cell by cell flow file "CBC". Keys in this list are not guaranteed to be in any particular order. The code below uses the head file key to retrieve head data and then plots head data using matplotlib.
```
import matplotlib.pyplot as plt
import numpy as np
# get all head data
head = sim.simulation_data.mfdata['example_model', 'HDS', 'HEAD']
# get the head data from the end of the model run
head_end = head[-1]
# plot the head data from the end of the model run
levels = np.arange(160,162,1)
extent = (0.0, 1000.0, 2500.0, 0.0)
plt.contour(head_end[0, :, :],extent=extent)
plt.show()
```
Results can also be retrieved using the existing binaryfile method.
```
# get head data using old flopy method
hds_path = os.path.join(sim_path, model_name + '.hds')
hds = flopy.utils.HeadFile(hds_path)
# get heads after 1.0 days
head = hds.get_data(totim=1.0)
# plot head data
plt.contour(head[0, :, :],extent=extent)
plt.show()
```
| github_jupyter |
# Regression
Regression is fundamentally a way to estimate an independent variable based on its relationships to predictor variables. This can be done both linearly and non-linearly with a single to many predictor variables. However, there are certain assumptions that must be satisfied in order for these results to be trustworthy.
```
#Import Packages
import numpy as np
import pandas as pd
#Plotting
import seaborn as sns
sns.set(rc={'figure.figsize':(11,8)})
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as ticker
#Stats
from scipy import stats
import statsmodels.api as sm
import statsmodels.formula.api as smf
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
from sklearn.linear_model import LinearRegression
from sklearn.datasets import make_regression
import statistics
```
## Linear Regression
Recall $y=mx+b$, in this case we can see that $y$ is defined by a pair of constants and $x$. Similarly in regression, we have our independent target variable being estimated by a set of derived constants and the input variable(s). Since what comes out of the model is not necessarily what was observed, we call the prediction of the model $\hat{y}$. The equation is then: $\hat{y}=\hat{\beta_0} + \hat{\beta_1}x_1 + ... + \hat{\beta_n}x_n + \epsilon$ for n predictors, where the $\hat{\beta_i}$ are the estimated coefficients and constant.
In the case of linear regression, the line will have the equation:$\hat{y}=\hat{\beta_0}+\hat{\beta_1}x_1+\epsilon$, with $\epsilon$ being the distance between the observation and the prediction, i.e. $y-\hat{y}$.
Now the question is "how do we determine what the $\hat{\beta_0}\ ...\ \hat{\beta_n}$ should be?" or rather "how can we manipulate the $\hat{\beta_0}\ ...\ \hat{\beta_n}$ to minimize error?"
$\epsilon$ is defined as the difference between reality and prediction, so minimizing this distance by itself would be a good starting place. However, since we generally want to increase punishment to the algorithm for missing points by a large margin, $punishment = (y-\hat{y})^2 = \epsilon^2$ is used. More formally, for a series of $m$ data points, the Sum of Squared Errors is defined as follows: $SSE=\sum_{i=1}^{m}(y_i-\hat{y_i})^2$ for $m$ predictions. This makes the Mean Squared Error $\frac{SSE}{m}=MSE$. So, if we minimize mean squared error ($MSE$) we find the optimal line.
```
#Create a function to predict our line
def predict(x,slope,intercept):
return (slope*x+intercept)
#Generate Data
X = np.random.uniform(10,size=10)
Y = list(map(lambda x: x+ 2*np.random.randn(1)[0], X)) #f(x) = x + 2N
#Graph Cloud
sns.relplot(x='X', y='Y', data=pd.DataFrame({'X':X,'Y':Y}),color='darkblue', zorder=10)
```
Now, with a regression line
```
#Get Regression Results
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
#Create Regression Line
lx = [min(X)-1,max(X)+1] #X-Bounds
ly = [predict(lx[0],slope,intercept),predict(lx[1],slope,intercept)] #Predictions at Bounds
#Graph
sns.relplot(x='X', y='Y', data=pd.DataFrame({'X':X,'Y':Y}),color='darkblue', zorder=10)
sns.lineplot(lx, ly, color='r', zorder=5)
```
Then, find the $y_i-\hat{y_i}$
```
#Plot Background
sns.relplot(x='X', y='Y', data=pd.DataFrame({'X':X,'Y':Y}),color='darkblue', zorder=10)
sns.lineplot(lx, ly, color='r', zorder=5)
#Plot Distances
for i in range(len(X)):
plt.plot([X[i],X[i]],[Y[i],predict(X[i],slope,intercept)],color = 'royalblue',linestyle=':',zorder=0)
```
Finally, calculate the MSE
```
#Setup
fig, axs = plt.subplots(ncols=3, figsize=(15,5))
#Calculations
xy = pd.DataFrame({'X':X,'Y':Y}).sort_values('Y')
xy['Y`'] = list(map(lambda x,y: ((predict(x,slope,intercept))), xy['X'], xy['Y']))
xy['E'] = abs(xy['Y'] - xy['Y`'])
xy = xy.sort_values(by=['E'])
#Plot First Graph
for i in range(len(xy)):
axs[0].plot([i,i],[0,xy.iloc[i,3]], color='royalblue',linestyle='-')
axs[0].set_title('Sorted Errors')
#Plot Second Graph
for i in range(len(xy)):
axs[1].plot([i,i],[0,xy.iloc[i,3]**2], color='royalblue',linestyle='-')
axs[1].set_title('Sorted Squared Errors')
#Plot Third Graph
total = min(xy['E'])
for i in range(len(xy)):
deltah = xy.iloc[i,3]**2
axs[2].plot([i,i],[0,xy.iloc[i,3]**2], color='royalblue',linestyle='-')
axs[2].plot([max(0,i-1),i],[total,total + deltah], color='red',linestyle='-',marker='o')
total+=deltah
axs[2].set_title('Running Sum of Squared Erros')
plt.show()
#Calculate MSE
MSE = statistics.mean(list(map(lambda e: e**2, xy['E'])))
print('Sum of Squared Errors:',round(total-min(xy['E']),3))
print('Total Observations:',len(xy))
print('Mean Squared Error:',round(MSE,4))
```
Now that we have a measure of success, we would like to develop some intuition and a function for how good we are predicting relative to perfect. If we were to imagine a straight line exactly through the mean of the Y-value (target value) of the cloud, we can imagine that half the points are above and half below. This calculation would give an $SSE$ of $\sum_{i=1}^{n}(y_i-\bar{y_i})^2$, or how this will be defined hereon the $SST$ or Sum of Squared Total Error. This guess is assumed to be the baseline guess you can make, and anything better is considered to be an improvement. As such, the ratio of $\frac{SSE}{SST}$ models success relative to a benchmark. Functioning under the assumption that $0\leq SSE\leq SST$, $1-\frac{SSE}{SST}$ gives a [0,1] interval of success generally known as $R^2$.
Let's calculate.
```
#Create Data, Figures
X = np.random.uniform(10,size=10)
Y = list(map(lambda x: x+ len(X)/10*np.random.randn(1)[0], X))
avgy = statistics.mean(Y)
fig, axs = plt.subplots(ncols=2,figsize=(10,5))
#Calculate Regressions
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
lx = [min(X)-1,max(X)+1]
ly = [predict(lx[0],slope,intercept),predict(lx[1],slope,intercept)]
avgy = statistics.mean(Y)
#Calculate and Format MSE
MSEr = 'MSE: '+str(round(statistics.mean(list(map(lambda x, y: (y-(predict(x,slope,intercept)))**2, X, Y))),3))
MSEa = 'MSE: '+str(round(statistics.mean(list(map(lambda y: (y-avgy)**2, Y))),3))
#Create Scatter And Lines
axs[0].scatter(X,Y, color='darkblue', zorder=10)
axs[1].scatter(X,Y, color='darkblue', zorder=10)
axs[0].plot(lx, ly, color='r', zorder=5, label=MSEr)
axs[1].plot(lx, [avgy,avgy], color='lightslategray', label=MSEa)
#Create Dotted Lines
for i in range(len(X)):
axs[0].plot([X[i],X[i]],[Y[i],predict(X[i],slope,intercept)],color = 'red',linestyle=':',zorder=0)
axs[1].plot([X[i],X[i]],[Y[i],avgy],color = 'dimgray',linestyle=':',zorder=0)
#Calculate R2
R2r = 'Linear Regression: R-squared = '+str(round(r_value,3))
SSTa = sum(list(map(lambda y: (y-statistics.mean(Y))**2, Y)))
SSEa = sum(list(map(lambda y: (y-statistics.mean(Y))**2, Y)))
R2a = 'Linear Regression: R-squared = '+str(round(1 - SSEa/SSTa,3))
#Format
axs[0].set(xlabel='X',ylabel='Y',title=R2r)
axs[1].set(xlabel='X',ylabel='Y',title=R2a)
axs[0].legend()
axs[1].legend()
#Paint Graphs
plt.show()
```
Thankfully, we don't have to do this manually. To repeat this process and get out all the same information automatically, we can use SciPy, a scientific modeling library for python. In this case, as you saw above, we are using the linregress function to solve the linear system of equations that gives us the minimal MSE. Let's see how this would be implemented in practice.
```
X = np.random.randn(150).tolist()
Y = list(map(lambda x: x + np.random.randn(1)[0]/1.5, X)) #f(x) = x + N(0,2/3)
reg = stats.linregress(X,Y)
title = 'Linear Regression R-Squared: '+str(round(reg.rvalue,3))
fig = sns.lmplot(x='X',y='Y',data=pd.DataFrame({'X':X,'Y':Y}),ci=False).set(title=title)
fig.ax.get_lines()[0].set_color('red')
```
However, not all data can be regressed. There are four criterian that must be met in order for the assumptions necessary for regression to be satisfied. These can be generally analyzed through residuals, or $y-\hat{y}$.
1) Linearity
2) Independence
3) Homoscedasticity
4) Normality
We will go through each one a describe how to ensure your data is acceptable.
### Linearity
This is essentially that the relationship between the variables in linear in nature
#### Method 1: Residuals vs. Observed, what you are looking for is any pattern to the data.
```
#Data
X = np.random.randn(300).tolist()
#Setup
fig, axs = plt.subplots(ncols=2,figsize=(10,5))
#Accepted
Y = list(map(lambda x: x + np.random.randn(1)[0]/1.5, X))
sns.residplot(x='X',y='Y',data=pd.DataFrame({'X':X,'Y':Y})
,ax=axs[0]).set_title('Residuals of Linear Regression: Accepted')
#Rejected
Y = list(map(lambda x: x**2 + np.random.randn(1)[0]/8, X))
sns.residplot(x='X',y='Y',data=pd.DataFrame({'X':X,'Y':Y})
,ax=axs[1], color='b').set_title('Residuals of Linear Regression: Rejected')
```
#### Method 2: Observed vs. Predicted, looking for patterns
```
#Setup
fig, axs = plt.subplots(ncols=2,figsize=(10,5))
#Accept
Y = list(map(lambda x: x + np.random.randn(1)[0]/1.5, X))
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
Yp = list(map(lambda x: predict(x,slope,intercept), X))
sns.scatterplot(x='Y',y='Yp',data=pd.DataFrame({'Y':Y,'Yp':Yp}), ax=axs[0])
axs[0].set(title='Predictions versus Observations: Accepted')
#Reject
Y = list(map(lambda x: x**2 + np.random.randn(1)[0]/1.5, X))
slope, intercept, r_value, p_value, std_err = stats.linregress(X,Y)
Yp = list(map(lambda x: predict(x,slope,intercept), X))
sns.scatterplot(x='Y',y='Yp',data=pd.DataFrame({'Y':Y,'Yp':Yp}), ax=axs[1])
axs[1].set(title='Predictions versus Observations: Rejected')
```
### Independence
This looks at whether the $\epsilon$ is correlated to the predictors either through sorting or other methods. This is a serious consideration for time series data, but we will focus on non-time series for now.
#### Method 1) Same as above. Compare the order of observations agains the residuals, there should be no pattern
#### Method 2) For more than 1 predictor, use VIF scores to check for multicolinearity amongst the variables
There is no hard and fast rule for kicking variables out due to VIF scores, but a good rule of thumb is anything more than 10 needs should likely need to be dealt with and anything greater than 5 should at least be looked at.
```
#Calculate Scores
A = np.random.randn(150).tolist()
B = list(map(lambda a: a + np.random.randn(1)[0]/20, A))
C = np.random.uniform(1,10,size=150).tolist()
data = pd.DataFrame({'A':A,'B':B,'C':C})
data = add_constant(data)
VIFs = pd.DataFrame([variance_inflation_factor(data.values, i) for i in range(data.shape[1])], index=data.columns,
columns=['VIF Score'])
print('Accept if less than 5, Reject if more than 10\n')
print(VIFs)
```
### Homoscedasticity
Check for if the magnitude of the residuals is the same over time or observations
#### Method 1) Ordered Plot
```
#Setup
fig, axs = plt.subplots(ncols=2,figsize=(10,5))
#Accept
X = np.random.randn(150)
Y = np.random.randn(150)
slope, intercept, r_value, p_value, std_err = stats.linregress(X, Y)
predicted = X*slope+intercept
residuals = np.subtract(Y,predicted)
sns.scatterplot(x='Predicted',y='Residuals', ax = axs[0],
data=pd.DataFrame({'Predicted':predicted,'Residuals':residuals})).set_title('Accept')
#Reject
X = np.random.randn(150)
Y = list(map(lambda x: x**2 + np.random.randn(1)[0], X))
slope, intercept, r_value, p_value, std_err = stats.linregress(X, Y)
predicted = X*slope+intercept
residuals = np.subtract(Y,predicted)
sns.scatterplot(x='Predicted',y='Residuals', ax = axs[1],
data=pd.DataFrame({'Predicted':predicted,'Residuals':residuals})).set_title("Reject")
```
### Normality
#### Method 1) Q-Q Plot
```
fig, axs = plt.subplots(ncols=2,figsize=(10,5))
#Accept
X = np.random.randn(150)
Y = np.random.randn(150)
slope, intercept, r_value, p_value, std_err = stats.linregress(X, Y)
predicted = X*slope+intercept
residuals = np.subtract(Y,predicted)
sm.qqplot(residuals, line='45',ax=axs[0])
#Reject
X = np.random.randn(150)
Y = list(map(lambda x: x**2 + np.random.randn(1)[0], X))
slope, intercept, r_value, p_value, std_err = stats.linregress(X, Y)
predicted = X*slope+intercept
residuals = np.subtract(Y,predicted)
sm.qqplot(residuals, line='45',ax=axs[1])
plt.show()
```
## Multiple Regression
This is exactly the same as before, but we can increase the number of predictors. For example, this is what it looks like to perform linear regression in free space, fitting a plane to a cloud of points.
```
#Generate Data
X = np.random.randn(150)
Y = np.random.randn(150)
Z = list(map(lambda y, x: 4*(y + x) + np.random.randn(1)[0], Y, X))
#Regress
reg = smf.ols(formula='Z ~ X+Y', data=pd.DataFrame({'X':X,'Y':Y,'Z':Z})).fit()
#Calculate Plane
xx = np.linspace(min(X), max(X), 8)
yy = np.linspace(min(Y), max(Y), 8)
XX, YY = np.meshgrid(xx, yy)
ZZ = XX*reg.params[1]+YY*reg.params[2]+reg.params[0]
#Create the figure
fig = plt.figure(figsize=(15,15))
ax = fig.add_subplot(111,projection='3d')
ax.set(xlabel='X',ylabel='Y',zlabel='Z')
ax.view_init(30, 315)
ax.set(title='Three Dimensional Visualization')
#Plot Plane and Points
ax.scatter(X, Y, Z, color='blue')
ax.plot_wireframe(X=XX,Y=YY,Z=ZZ)
```
We are also still able to run all of the same tests as before, looking at the diagnostic plots.
```
#1) Linearity: Residuals vs. Observed
residuals = list(map(lambda x, y, z: z-(x*reg.params[1]+y*reg.params[2]+reg.params[0]), X,Y,Z))
sns.relplot(x='Observed',y='Residuals',
data=pd.DataFrame({'Observed':Z,'Residuals':residuals}))
#2) Independence: VIF Scores
data = pd.DataFrame({'X':X,'Y':Y})
data = add_constant(data)
VIFs = pd.DataFrame([variance_inflation_factor(data.values, i) for i in range(data.shape[1])], index=data.columns,
columns=['VIF Score'])
print('Accept if less than 5, Reject if more than 10\n')
print(VIFs)
#3) Predicted vs. Residuals
predicted = X*reg.params[1]+Y*reg.params[2]+reg.params[0]
residuals = list(map(lambda x, y, z: z-(x*reg.params[1]+y*reg.params[2]+reg.params[0]), X,Y,Z))
sns.relplot(x='Predicted',y='Residuals',
data=pd.DataFrame({'Predicted':predicted,'Residuals':residuals}))
#4) Normality: QQ Plot
residuals = np.array(list(map(lambda x, y, z: z-(x*reg.params[1]+y*reg.params[2]+reg.params[0]), X,Y,Z)))
sm.qqplot(residuals, line='45')
plt.show()
```
This can be done in infinitely many dimensions, but because we cannot visualize above three dimensions (at least statically), you can only investigate this with the diagnostic plots.
## Predictor Significance Analysis
Remembering back to how you can statistically say whether two random variables are equivalent with a T test, we would like to know if the $\hat{\beta_i}$ is statistically different from zero. If it is, at whatever confidence level we choose, we can say that it is useful in predicting the target variable. This is generally done automatically by any regression engine and is stored in the form of a p-value. What the p-value says is the 1 - the probability that what was observed is different than what it is being tested against, in this case zero. In other words, the lower your p-value the higher the probability that the two values are different. When you hear the term statistical significance, what you are hearing is "does the p-value cross our confidence boundary?" This is often thought of as 0.1, 0.05, or 0.01 depending on the application. Each predictor will have it's own p-value, which can say whether or not it is useful in predicting the target variable. Let's see.
```
#Generate Data
X = np.random.randn(150)
Y = np.random.randn(150)
Z = list(map(lambda y, x: 4*(y + x) + np.random.randn(1)[0], Y, X))
#Regress
reg = smf.ols(formula='Z ~ X+Y', data=pd.DataFrame({'X':X,'Y':Y,'Z':Z})).fit()
#Grab P-values and Coefficients
confint = pd.DataFrame(reg.conf_int())
pvalues = pd.DataFrame({'P-Value':reg.pvalues.round(7)})
print(pvalues)
print(confint)
```
This is generally much easier to do in purely statistical engines like R. We will cover that soon.
## Solving Regression
There are many ways that regression equations can be solved, one of these ways is through gradient descent, essentially adjusting a combination of variables until you find the minimum MSE. Let's visualize this. Note, this will take a minute or so to run as it calculating the MSE of a regression line for all the possibilities. What will develop is a smooth visualization of the MSEs. If you imagine placing a marble on the graph, where it roles is the direction to the best fit line. It will roll, and reverse, and roll, and reverse until it finally settles at the lowest point. This is gradient descent.
```
def getMSE(slope, intercept, X, Y):
return statistics.mean(list(map(lambda x, y: (y-(predict(x,slope,intercept)))**2, X, Y)))
def getHMData(slope,intercept,X,Y,step=0.1,):
slopeDist = slope[1]-slope[0]
slopeSteps = int(slopeDist/step)
interceptDist = intercept[1]-intercept[0]
interceptSteps = int(interceptDist/step)
data = []
for i in range(slopeSteps):
row = []
for j in range(interceptSteps):
row.append(getMSE(slope=slope[0]+step*i,intercept=intercept[0]+step*j,X=X,Y=Y))
data.append(row)
return pd.DataFrame(data)
#Set Limits
increment = 0.1
slopes = [-10,10]
numslopes = (slopes[1]-slopes[0])/increment
intercepts = [-10,10]
numinter = (intercepts[1]-intercepts[0])/increment
#Get Data
X = np.random.randn(500)-2*np.random.randn(1)[0]
Y = list(map(lambda x: np.random.uniform(1,4)*x + np.random.randn(1)[0], X))
data = getHMData(slope=slopes,intercept=intercepts,X=X,Y=Y,step=increment)
#Format Labels
data = data.set_index(np.linspace(slopes[0],slopes[1],int(numslopes)).round(1))
data.columns = np.linspace(intercepts[0],intercepts[1],int(numinter)).round(1)
#Heat Map of MSE
fig, axs = plt.subplots(ncols=2,figsize=(20,10))
sns.heatmap(data.iloc[::-1,:],cmap="RdYlGn_r",ax = axs[1]).set(title='MSE over Slopes and Intercepts')
axs[1].set(xlabel='Intercept',ylabel='Slope')
#Regression Plot
sns.regplot(x='X',y='Y',data=pd.DataFrame({'X':X,'Y':Y}), ax = axs[0])
slope, intercept, r_value, p_value, std_err = stats.linregress(X, Y)
title = ('Regression\nSlope: '+str(slope.round(3))+', Intercept: '+
str(intercept.round(3))+', R-Squared: '+str(r_value.round(3)))
axs[0].set_title(title)
#Plot Over Heatmap
axs[1].hlines([slope*-10+100], *axs[1].get_xlim(),linestyles='dashed',colors='indigo')
axs[1].vlines([intercept*10+100], *axs[1].get_ylim(),linestyles='dashed',colors='indigo')
axs[1].scatter(intercept*10+100,slope*-10+100,zorder=10,c='indigo',s=50)
```
## Your turn to solve a problem.
The below code will generate for you 3 sets of values: 'X1','X2' - Predictors, 'Y' - Target
You will need to generate a regression. Then, you should check the predictors to see which one is significant. After this, you will need to re-run the regression and confirm if the data satisfies our regression assumption. Finally, you should create a seaborn plot of your regression.
```
Xs, Y = make_regression(n_samples=300, n_features=2, n_informative=1, noise=1)
data = pd.DataFrame(np.column_stack([Xs,Y]), columns=['X1','X2','Y'])
reg = smf.ols(formula='Y~X1+X2', data=pd.DataFrame({'X1':data['X1'],'X2':data['X2'],'Y':data['Y']})).fit()
confint = pd.DataFrame(reg.conf_int())
pvalues = pd.DataFrame({'P-Value':reg.pvalues.round(7)})
print(confint)
print(pvalues)
reg = smf.ols(formula='Y ~ X2', data=pd.DataFrame({'X2':data['X2'],'Y':data['Y']})).fit()
sm.qqplot(reg.resid, line='45')
plt.show()
sns.relplot(x='Residuals',y='Predicted',data=pd.DataFrame({'Residuals':reg.resid,'Predicted':reg.fittedvalues}))
```
| github_jupyter |
```
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn import metrics
import numpy as np
import pandas as pd
from matplotlib import pylab as plt
import re
%matplotlib inline
```
## 数据读取
```
df = pd.read_csv('data/train.csv')
df.head(10)
print('共有{}条数据.'.format(len(df)))
print('幸存者{}人.'.format(df.Survived.sum()))
```
## 数据分析
#### Pclass 代表社会经济状况,数字越小状况越好 1 = upper, 2 = mid, 3 = lower
```
df.Pclass.value_counts(dropna=False)
df[['Pclass', 'Survived']].groupby(['Pclass']).mean().plot(kind='bar')
```
*社会等级越高,幸存率越高*
```
df['P1'] = (df.Pclass == 1).astype('int')
df['P2'] = (df.Pclass == 2).astype('int')
df['P3'] = (df.Pclass == 3).astype('int')
df.head()
```
#### Sex male时为1, female时为0
```
df.Sex.value_counts(dropna=False)
df[['Sex', 'Survived']].groupby(['Sex']).mean().plot(kind='bar')
```
*女性的幸存率比较高*
```
df.Sex.replace(['male', 'female'], [1, 0], inplace=True)
df.head()
```
#### Age 有缺失值,简单用平均值填充
```
df.Age.isnull().sum()
df.Age.fillna(df.Age.median(), inplace=True)
df['Age_categories'] = pd.cut(df['Age'], 5)
df[['Age_categories', 'Survived']].groupby(['Age_categories']).mean().plot(kind='bar')
```
*可见小孩的幸存率比较高*
#### Sibsp 在船上的旁系亲属和配偶的数量
```
df.SibSp.isnull().sum()
```
#### Parch 在船上的父母子女的数量
```
df.Parch.isnull().sum()
df['Family_size'] = df['SibSp'] + df['Parch']
df[['Family_size', 'Survived']].groupby(['Family_size']).mean().plot(kind='bar')
```
*亲属和配偶的数量和父母子女的数量合并为一个特征*
#### Fare 船费
```
df.Fare.isnull().sum()
df['Fare_categories'] = pd.qcut(df['Fare'], 5)
df[['Fare_categories', 'Survived']].groupby(['Fare_categories']).mean().plot(kind='bar')
```
*票价越贵,乘客越有钱,幸存几率越大*
#### Embarked 登船港口 C = Cherbourg, Q = Queenstown, S = Southampton
```
df.Embarked.value_counts(dropna=False)
df[['Embarked', 'Survived']].groupby(['Embarked']).mean().plot(kind='bar')
```
*可以看到在Cherbourg登陆的乘客幸存几率大*
```
df['E1'] = (df['Embarked'] == 'S').astype('int')
df['E2'] = (df['Embarked'] == 'C').astype('int')
df['E3'] = (df['Embarked'] == 'Q').astype('int')
df['E4'] = (df['Embarked'].isnull()).astype('int')
df.head()
```
#### 称谓,Mr、Miss、Master ...
```
def parse_title(name):
match = re.match(r'\w+\,\s(\w+)\.\s', name)
if match:
return match.group(1)
else:
return np.nan
df['Title'] = df.Name.apply(parse_title)
df.Title.fillna('NoTitle', inplace=True)
df.Title.value_counts(dropna=False)
title_set = set(df.Title)
for i, title in enumerate(title_set):
df['T'+str(i)] = (df.Title == title).astype('int')
df.head(3)
```
### 预处理后的数据
```
df_processed = df[['Survived', 'Sex', 'Age', 'Family_size', 'Fare', 'Pclass',
'E1', 'E2', 'E3', 'E4', 'T0', 'T1', 'T2', 'T3', 'T4', 'T5',
'T6', 'T7', 'T8', 'T9', 'T10', 'T11', 'T12', 'T13', 'T14']]
df_processed.head(3)
```
## 划分训练集和测试集
```
total_X = df_processed.iloc[:, 1:].values
total_Y = df_processed.iloc[:, 0].values
train_X, test_X, train_Y, test_Y = train_test_split(total_X, total_Y, test_size=0.25)
```
## 数据标准化
```
X_scaler = StandardScaler()
train_X_std = X_scaler.fit_transform(train_X)
test_X_std = X_scaler.transform(test_X)
```
### SVM超参数网格搜索
```
svm = SVC()
params = {
'C': np.logspace(1, (2 * np.random.rand()), 10),
'gamma':np.logspace(-4, (2 * np.random.rand()), 10)
}
grid_search = GridSearchCV(svm, params, cv=3)
grid_search.fit(train_X_std, train_Y)
best_score = grid_search.best_score_
best_params = grid_search.best_params_
C = best_params['C']
gamma = best_params['gamma']
C, gamma
grid_search.score(test_X_std, test_Y)
predicts = grid_search.predict(test_X_std)
print(metrics.classification_report(test_Y, predicts))
```
## 最终预测
```
total_X_std = X_scaler.transform(df_processed.iloc[:, 1:].values)
total_Y = df_processed.iloc[:, 0]
svm = SVC(C=C, gamma=gamma)
svm.fit(total_X_std, total_Y)
svm.score(total_X_std, total_Y)
test_df = pd.read_csv('data/test.csv')
test_df.head()
test_df.Sex.replace(['male', 'female'], [1, 0], inplace=True)
test_df.Age.fillna(df.Age.median(), inplace=True)
test_df['Family_size'] = test_df['SibSp'] + test_df['Parch']
test_df['P1'] = (df.Pclass == 1).astype('int')
test_df['P2'] = (df.Pclass == 2).astype('int')
test_df['P3'] = (df.Pclass == 3).astype('int')
test_df['E1'] = (df['Embarked'] == 'S').astype('int')
test_df['E2'] = (df['Embarked'] == 'C').astype('int')
test_df['E3'] = (df['Embarked'] == 'Q').astype('int')
test_df['E4'] = (df['Embarked'].isnull()).astype('int')
test_df['Title'] = df.Name.apply(parse_title)
test_df.Title.fillna('NoTitle', inplace=True)
title_set = set(df.Title)
for i, title in enumerate(title_set):
test_df['T'+str(i)] = (test_df.Title == title).astype('int')
test_df.head()
test_df.isnull().sum()
test_df.Fare.fillna(df.Fare.median(), inplace=True)
test_df_processed = test_df[['Sex', 'Age', 'Family_size', 'Fare', 'Pclass',
'E1', 'E2', 'E3', 'E4', 'T0', 'T1', 'T2', 'T3', 'T4', 'T5',
'T6', 'T7', 'T8', 'T9', 'T10', 'T11', 'T12', 'T13', 'T14']]
test_df_processed.head(3)
final_X = test_df_processed.values
final_X_std = X_scaler.transform(final_X)
```
### 预测
```
predicts = svm.predict(final_X_std)
ids = test_df['PassengerId'].values
result = pd.DataFrame({
'PassengerId': ids,
'Survived': predicts
})
result.head()
result.to_csv('./2018-8-23_6.csv', index=False)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/3_image_processing_deep_learning_roadmap/3_deep_learning_advanced/1_Blocks%20in%20Deep%20Learning%20Networks/8)%20Resnet%20V2%20Bottleneck%20Block%20(Type%20-%202).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Goals
### 1. Learn to implement Resnet V2 Bottleneck Block (Type - 1) using monk
- Monk's Keras
- Monk's Pytorch
- Monk's Mxnet
### 2. Use network Monk's debugger to create complex blocks
### 3. Understand how syntactically different it is to implement the same using
- Traditional Keras
- Traditional Pytorch
- Traditional Mxnet
# Resnet V2 Bottleneck Block - Type 1
- Note: The block structure can have variations too, this is just an example
```
from IPython.display import Image
Image(filename='imgs/resnet_v2_bottleneck_without_downsample.png')
```
# Table of contents
[1. Install Monk](#1)
[2. Block basic Information](#2)
- [2.1) Visual structure](#2-1)
- [2.2) Layers in Branches](#2-2)
[3) Creating Block using monk visual debugger](#3)
- [3.1) Create the first branch](#3-1)
- [3.2) Create the second branch](#3-2)
- [3.3) Merge the branches](#3-3)
- [3.4) Debug the merged network](#3-4)
- [3.5) Compile the network](#3-5)
- [3.6) Visualize the network](#3-6)
- [3.7) Run data through the network](#3-7)
[4) Creating Block Using MONK one line API call](#4)
- [Mxnet Backend](#4-1)
- [Pytorch Backend](#4-2)
- [Keras Backend](#4-3)
[5) Appendix](#5)
- [Study Material](#5-1)
- [Creating block using traditional Mxnet](#5-2)
- [Creating block using traditional Pytorch](#5-3)
- [Creating block using traditional Keras](#5-4)
<a id='0'></a>
# Install Monk
## Using pip (Recommended)
- colab (gpu)
- All bakcends: `pip install -U monk-colab`
- kaggle (gpu)
- All backends: `pip install -U monk-kaggle`
- cuda 10.2
- All backends: `pip install -U monk-cuda102`
- Gluon bakcned: `pip install -U monk-gluon-cuda102`
- Pytorch backend: `pip install -U monk-pytorch-cuda102`
- Keras backend: `pip install -U monk-keras-cuda102`
- cuda 10.1
- All backend: `pip install -U monk-cuda101`
- Gluon bakcned: `pip install -U monk-gluon-cuda101`
- Pytorch backend: `pip install -U monk-pytorch-cuda101`
- Keras backend: `pip install -U monk-keras-cuda101`
- cuda 10.0
- All backend: `pip install -U monk-cuda100`
- Gluon bakcned: `pip install -U monk-gluon-cuda100`
- Pytorch backend: `pip install -U monk-pytorch-cuda100`
- Keras backend: `pip install -U monk-keras-cuda100`
- cuda 9.2
- All backend: `pip install -U monk-cuda92`
- Gluon bakcned: `pip install -U monk-gluon-cuda92`
- Pytorch backend: `pip install -U monk-pytorch-cuda92`
- Keras backend: `pip install -U monk-keras-cuda92`
- cuda 9.0
- All backend: `pip install -U monk-cuda90`
- Gluon bakcned: `pip install -U monk-gluon-cuda90`
- Pytorch backend: `pip install -U monk-pytorch-cuda90`
- Keras backend: `pip install -U monk-keras-cuda90`
- cpu
- All backend: `pip install -U monk-cpu`
- Gluon bakcned: `pip install -U monk-gluon-cpu`
- Pytorch backend: `pip install -U monk-pytorch-cpu`
- Keras backend: `pip install -U monk-keras-cpu`
## Install Monk Manually (Not recommended)
### Step 1: Clone the library
- git clone https://github.com/Tessellate-Imaging/monk_v1.git
### Step 2: Install requirements
- Linux
- Cuda 9.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu90.txt`
- Cuda 9.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu92.txt`
- Cuda 10.0
- `cd monk_v1/installation/Linux && pip install -r requirements_cu100.txt`
- Cuda 10.1
- `cd monk_v1/installation/Linux && pip install -r requirements_cu101.txt`
- Cuda 10.2
- `cd monk_v1/installation/Linux && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Linux && pip install -r requirements_cpu.txt`
- Windows
- Cuda 9.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu90.txt`
- Cuda 9.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu92.txt`
- Cuda 10.0 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu100.txt`
- Cuda 10.1 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu101.txt`
- Cuda 10.2 (Experimental support)
- `cd monk_v1/installation/Windows && pip install -r requirements_cu102.txt`
- CPU (Non gpu system)
- `cd monk_v1/installation/Windows && pip install -r requirements_cpu.txt`
- Mac
- CPU (Non gpu system)
- `cd monk_v1/installation/Mac && pip install -r requirements_cpu.txt`
- Misc
- Colab (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_colab.txt`
- Kaggle (GPU)
- `cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt`
### Step 3: Add to system path (Required for every terminal or kernel run)
- `import sys`
- `sys.path.append("monk_v1/");`
# Imports
```
# Common
import numpy as np
import math
import netron
from collections import OrderedDict
from functools import partial
#Using mxnet-gluon backend
# When installed using pip
from monk.gluon_prototype import prototype
# When installed manually (Uncomment the following)
#import os
#import sys
#sys.path.append("monk_v1/");
#sys.path.append("monk_v1/monk/");
#from monk.gluon_prototype import prototype
```
<a id='2'></a>
# Block Information
<a id='2_1'></a>
## Visual structure
```
from IPython.display import Image
Image(filename='imgs/resnet_v2_bottleneck_without_downsample.png')
```
<a id='2_2'></a>
## Layers in Branches
- Number of branches: 2
- Common Elements
- batchnorm -> relu
- Branch 1
- identity
- Branch 2
- conv_1x1 -> batchnorm -> relu -> conv_3x3 -> batchnorm -> relu -> conv1x1
- Branches merged using
- Elementwise addition
(See Appendix to read blogs on resnets)
<a id='3'></a>
# Creating Block using monk debugger
```
# Imports and setup a project
# To use pytorch backend - replace gluon_prototype with pytorch_prototype
# To use keras backend - replace gluon_prototype with keras_prototype
from monk.gluon_prototype import prototype
# Create a sample project
gtf = prototype(verbose=1);
gtf.Prototype("sample-project-1", "sample-experiment-1");
```
<a id='3-1'></a>
## Create the first branch
```
def first_branch():
network = [];
network.append(gtf.identity());
return network;
# Debug the branch
branch_1 = first_branch()
network = [];
network.append(branch_1);
gtf.debug_custom_model_design(network);
```
<a id='3-2'></a>
## Create the second branch
```
def second_branch(output_channels=128, stride=1):
network = [];
# Bottleneck convolution
network.append(gtf.convolution(output_channels=output_channels//4, kernel_size=1, stride=stride));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
#Bottleneck convolution
network.append(gtf.convolution(output_channels=output_channels//4, kernel_size=1, stride=stride));
network.append(gtf.batch_normalization());
network.append(gtf.relu());
#Normal convolution
network.append(gtf.convolution(output_channels=output_channels, kernel_size=1, stride=1));
return network;
# Debug the branch
branch_2 = second_branch(output_channels=128, stride=1)
network = [];
network.append(branch_2);
gtf.debug_custom_model_design(network);
```
<a id='3-3'></a>
## Merge the branches
```
def final_block(output_channels=128, stride=1):
network = [];
#Common Elements
network.append(gtf.batch_normalization());
network.append(gtf.relu());
#Create subnetwork and add branches
subnetwork = [];
branch_1 = first_branch()
branch_2 = second_branch(output_channels=output_channels, stride=stride)
subnetwork.append(branch_1);
subnetwork.append(branch_2);
# Add merging element
subnetwork.append(gtf.add());
# Add the subnetwork
network.append(subnetwork)
return network;
```
<a id='3-4'></a>
## Debug the merged network
```
final = final_block(output_channels=64, stride=1)
network = [];
network.append(final);
gtf.debug_custom_model_design(network);
```
<a id='3-5'></a>
## Compile the network
```
gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False);
```
<a id='3-6'></a>
## Run data through the network
```
import mxnet as mx
x = np.zeros((1, 64, 224, 224));
x = mx.nd.array(x);
y = gtf.system_dict["local"]["model"].forward(x);
print(x.shape, y.shape)
```
<a id='3-7'></a>
## Visualize network using netron
```
gtf.Visualize_With_Netron(data_shape=(64, 224, 224))
```
<a id='4'></a>
# Creating Using MONK LOW code API
<a id='4-1'></a>
## Mxnet backend
```
from monk.gluon_prototype import prototype
gtf = prototype(verbose=1);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
# Single line addition of blocks
network.append(gtf.resnet_v2_bottleneck_block(output_channels=64, downsample=False));
gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False);
```
<a id='4-2'></a>
## Pytorch backend
- Only the import changes
```
#Change gluon_prototype to pytorch_prototype
from monk.pytorch_prototype import prototype
gtf = prototype(verbose=1);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
# Single line addition of blocks
network.append(gtf.resnet_v2_bottleneck_block(output_channels=64, downsample=False));
gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False);
```
<a id='4-3'></a>
## Keras backend
- Only the import changes
```
#Change gluon_prototype to keras_prototype
from monk.keras_prototype import prototype
gtf = prototype(verbose=1);
gtf.Prototype("sample-project-1", "sample-experiment-1");
network = [];
# Single line addition of blocks
network.append(gtf.resnet_v2_bottleneck_block(output_channels=64, downsample=False));
gtf.Compile_Network(network, data_shape=(64, 224, 224), use_gpu=False);
```
<a id='5'></a>
# Appendix
<a id='5-1'></a>
## Study links
- https://towardsdatascience.com/residual-blocks-building-blocks-of-resnet-fd90ca15d6ec
- https://medium.com/@MaheshNKhatri/resnet-block-explanation-with-a-terminology-deep-dive-989e15e3d691
- https://medium.com/analytics-vidhya/understanding-and-implementation-of-residual-networks-resnets-b80f9a507b9c
- https://hackernoon.com/resnet-block-level-design-with-deep-learning-studio-part-1-727c6f4927ac
<a id='5-2'></a>
## Creating block using traditional Mxnet
- Code credits - https://mxnet.incubator.apache.org/
```
# Traditional-Mxnet-gluon
import mxnet as mx
from mxnet.gluon import nn
from mxnet.gluon.nn import HybridBlock, BatchNorm
from mxnet.gluon.contrib.nn import HybridConcurrent, Identity
from mxnet import gluon, init, nd
def _conv3x3(channels, stride, in_channels):
return nn.Conv2D(channels, kernel_size=3, strides=stride, padding=1,
use_bias=False, in_channels=in_channels)
class ResnetBlockV1(HybridBlock):
def __init__(self, channels, stride, in_channels=0, **kwargs):
super(ResnetBlockV1, self).__init__(**kwargs)
#Common Elements
self.bn0 = nn.BatchNorm();
self.relu0 = nn.Activation('relu');
#Branch - 1
#Identity
# Branch - 2
self.body = nn.HybridSequential(prefix='')
self.body.add(nn.Conv2D(channels//4, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(_conv3x3(channels//4, stride, in_channels))
self.body.add(nn.BatchNorm())
self.body.add(nn.Activation('relu'))
self.body.add(nn.Conv2D(channels, kernel_size=1, strides=stride,
use_bias=False, in_channels=in_channels))
def hybrid_forward(self, F, x):
x = self.bn0(x);
x = self.relu0(x);
residual = x
x = self.body(x)
x = residual+x
return x
# Invoke the block
block = ResnetBlockV1(64, 1)
# Initialize network and load block on machine
ctx = [mx.cpu()];
block.initialize(init.Xavier(), ctx = ctx);
block.collect_params().reset_ctx(ctx)
block.hybridize()
# Run data through network
x = np.zeros((1, 64, 224, 224));
x = mx.nd.array(x);
y = block.forward(x);
print(x.shape, y.shape)
# Export Model to Load on Netron
block.export("final", epoch=0);
netron.start("final-symbol.json", port=8082)
```
<a id='5-3'></a>
## Creating block using traditional Pytorch
- Code credits - https://pytorch.org/
```
# Traiditional-Pytorch
import torch
from torch import nn
from torch.jit.annotations import List
import torch.nn.functional as F
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class ResnetBottleNeckBlock(nn.Module):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(ResnetBottleNeckBlock, self).__init__()
norm_layer = nn.BatchNorm2d
#Common elements
self.bn0 = norm_layer(inplanes);
self.relu0 = nn.ReLU(inplace=True);
# Branch - 1
#Identity
# Branch - 2
self.conv1 = conv1x1(inplanes, planes//4, stride)
self.bn1 = norm_layer(planes//4)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes//4, planes//4, stride)
self.bn2 = norm_layer(planes//4)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv1x1(planes//4, planes)
self.stride = stride
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
x = self.bn0(x);
x = self.relu0(x);
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out = self.conv3(out)
out += identity
return out
# Invoke the block
block = ResnetBottleNeckBlock(64, 64, stride=1);
# Initialize network and load block on machine
layers = []
layers.append(block);
net = nn.Sequential(*layers);
# Run data through network
x = torch.randn(1, 64, 224, 224)
y = net(x)
print(x.shape, y.shape);
# Export Model to Load on Netron
torch.onnx.export(net, # model being run
x, # model input (or a tuple for multiple inputs)
"model.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names = ['input'], # the model's input names
output_names = ['output'], # the model's output names
dynamic_axes={'input' : {0 : 'batch_size'}, # variable lenght axes
'output' : {0 : 'batch_size'}})
netron.start('model.onnx', port=9998);
```
<a id='5-4'></a>
## Creating block using traditional Keras
- Code credits: https://keras.io/
```
# Traditional-Keras
import keras
import keras.layers as kla
import keras.models as kmo
import tensorflow as tf
from keras.models import Model
backend = 'channels_last'
from keras import layers
def resnet_conv_block(input_tensor,
kernel_size,
filters,
stage,
block,
strides=(1, 1)):
filters1, filters2, filters3 = filters
bn_axis = 3
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
#Common Elements
start = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '0a')(input_tensor)
start = layers.Activation('relu')(start)
# Branch - 1
# Identity
shortcut = start
# Branch - 2
x = layers.Conv2D(filters1, (1, 1), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2a')(start)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters2, (3, 3), strides=strides,
kernel_initializer='he_normal',
name=conv_name_base + '2b', padding="same")(x)
x = layers.BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = layers.Activation('relu')(x)
x = layers.Conv2D(filters3, (1, 1),
kernel_initializer='he_normal',
name=conv_name_base + '2c')(x);
x = layers.add([x, shortcut])
x = layers.Activation('relu')(x)
return x
def create_model(input_shape, kernel_size, filters, stage, block):
img_input = layers.Input(shape=input_shape);
x = resnet_conv_block(img_input, kernel_size, filters, stage, block)
return Model(img_input, x);
# Invoke the block
kernel_size=3;
filters=[16, 16, 64];
input_shape=(224, 224, 64);
model = create_model(input_shape, kernel_size, filters, 0, "0");
# Run data through network
x = tf.placeholder(tf.float32, shape=(1, 224, 224, 64))
y = model(x)
print(x.shape, y.shape)
# Export Model to Load on Netron
model.save("final.h5");
netron.start("final.h5", port=8082)
```
# Goals Completed
### 1. Learn to implement Resnet V2 Bottleneck Block (Type - 1) using monk
- Monk's Keras
- Monk's Pytorch
- Monk's Mxnet
### 2. Use network Monk's debugger to create complex blocks
### 3. Understand how syntactically different it is to implement the same using
- Traditional Keras
- Traditional Pytorch
- Traditional Mxnet
| github_jupyter |
```
import pandas as pd
import geopandas as gpd
import matplotlib.pyplot as plt
import numpy as np
from pyproj import CRS
import pathlib
from pathlib import Path
from shapely import wkt
from tqdm import tqdm
import math
import codecs
from shapely import wkt
import folium
from folium import features
from folium import plugins
import gzip
from xml.etree.ElementTree import Element, SubElement, Comment, tostring
import xml.etree.ElementTree as ET
# to read the excel
from openpyxl import load_workbook
from openpyxl import Workbook
# import folium
from shapely.geometry import LineString, MultiLineString
import branca.colormap as cmp
from folium.plugins import Search
from tqdm import tqdm
import time
import datetime
from datetime import timedelta
# set the working directory
BASE_DIR = Path.cwd()
BASE_DIR
# save as geojson
def get_foldercreation_inf():
fname = pathlib.Path("../SF_all_trips/sf-tscore-all-trips-20PCsample-updatedRideHailFleet-updatedParking__etg/ITERS/it.4/4.linkstats.csv.gz")
assert fname.exists(), f'No such file: {fname}' # check that the file exists
ctime = datetime.datetime.fromtimestamp(fname.stat().st_ctime)
return ctime
# return ctime.strftime("%Y-%m-%d")
def get_dataframe(_time):
# linkstats file
linkstats = pd.read_csv("../SF_all_trips/sf-tscore-all-trips-20PCsample-updatedRideHailFleet-updatedParking__etg/ITERS/it.4/4.linkstats.csv.gz", compression="gzip", low_memory=False)
time = int(_time)
linkstats = linkstats[linkstats["hour"]==(time)].copy()
linkstats=linkstats.add_prefix("linkstats_")
linkstats.rename(columns={('linkstats_link'): 'id'}, inplace=True)
linkstats["id"] = linkstats["id"].astype('string')
date_time = get_foldercreation_inf()
if int(_time)<24:
date_time = date_time.strftime("%Y-%m-%d")
time_stamp = f'{int(_time):02d}'
linkstats["date_time"] = (date_time + " " + "{}:00:00".format(f'{int(_time):02d}'))
else:
date_time = get_foldercreation_inf() + datetime.timedelta(days=1)
date_time = date_time.strftime("%Y-%m-%d")
new_time = int(_time) - 24
linkstats["date_time"] = (date_time + " " + "{}:00:00".format(f'{abs(int(new_time)):02d}'))
return linkstats
# read the road network
sf_roadnetwork = gpd.read_file(BASE_DIR.parent.joinpath( 'Network',"sfNetwork.geojsonl"))
sf_roadnetwork = sf_roadnetwork[["id","modes","length","lanes","from","to","capacity","geometry"]]
sftimevariantnetwork =pd.DataFrame()
for time_hour in tqdm(range(0,30)):
# get the hour and filter the linkstat file
linkstats = get_dataframe(str(time_hour))
# merge with featureclass of SF data
comparision_network = sf_roadnetwork.merge(linkstats,on="id").copy()
# calculate the freespeed (mph), congested speed (mph), ratio (congestedsped/freespeed)
# linkstats
comparision_network["linkstats_freespeed_mph"] = comparision_network["linkstats_freespeed"]*2.23694
comparision_network["linkstats_congspd_mph"] = (comparision_network["linkstats_length"]/comparision_network["linkstats_traveltime"])*2.23694
comparision_network["linkstats_ratio"] = comparision_network["linkstats_congspd_mph"] / comparision_network["linkstats_freespeed_mph"]
comparision_network["linkstats_vc_ratio"] = comparision_network["linkstats_volume"]*5 / comparision_network["capacity"]
if int(time_hour)==0:
sftimevariantnetwork = comparision_network.copy()
else:
sftimevariantnetwork = pd.concat([sftimevariantnetwork,comparision_network], ignore_index=True)
# lastly, export the network
# sftimevariantnetwork.to_file(BASE_DIR.parent.joinpath("exported", ("sf_timevariantnetwork.geojson")), driver='GeoJSON')
linkstats.head()
# sftimevariantnetwork.to_csv(BASE_DIR.parent.joinpath("exported", ("sf_timevariantnetwork.csv")))
# read the road network, incase it is already saved in the geojson file
# sftimevariantnetwork = gpd.read_file(BASE_DIR.parent.joinpath("exported", ("sf_timevariantnetwork.geojson")))
# keep only selected columns fields
sf_timevariantnetwork = sftimevariantnetwork[["id", "modes","length","lanes","capacity","geometry",
'linkstats_freespeed','linkstats_volume', 'linkstats_traveltime',
'date_time', 'linkstats_freespeed_mph', 'linkstats_congspd_mph', 'linkstats_ratio', "linkstats_vc_ratio"]]
sf_timevariantnetwork['date_time']=pd.to_datetime(sf_timevariantnetwork['date_time']).dt.strftime('%Y-%m-%dT%H:%M:%S')
sf_timevariantnetwork["time"] = pd.to_datetime(sf_timevariantnetwork["date_time"]).dt.strftime('%Y-%m-%dT%H:%M:%S')
# add more green shades for 85% --> 100%
# green_shades = ['#008000', '#198c19', '#329932', '#4ca64c', '#66b266', '#7fbf7f', '#99cc99', '#b2d8b2', '#cce5cc', '#e5f2e5']
# colors for congstd speed/freespeed ratio
color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500",'#e5f2e5','#cce5cc','#b2d8b2','#99cc99','#7fbf7f','#66b266','#4ca64c','#329932', '#198c19','#008000']
# color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500","#cce5cc","#99cc99","#66b266","#008000"]
step_pct = cmp.StepColormap(
color_range_pct,
vmin=0, vmax=1,
index=[0,0.2,0.3,0.5,.6,0.7,0.80,0.85, 0.87,0.89,0.91,0.93,0.95,0.97,0.99,1.00], #for change in the colors, not used fr linear
caption='% Speeds Difference' #Caption for Color scale or Legend
)
# colors for congstd speed/freespeed ratio
color_range_pct_vc = ['#008000', '#329932', '#66b266', '#99cc99', '#cce5cc', '#e5f2e5', # green shade
'#ffa500', "#ffb732",'#ffc966', '#ffdb99', "#ffedcc", # orange shade
'#ffe5e5', '#ffcccc','#ffb2b2','#ff9999','#ff6666', '#ff3232', '#ff0000' ] # red shade
# color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500","#cce5cc","#99cc99","#66b266","#008000"]
step_pct_vc = cmp.StepColormap(
color_range_pct_vc,
vmin=0, vmax=1,
index=[0,0.1,0.2,0.3,0.4,0.5,
0.55,0.6,0.65,0.7,0.75,
0.80,0.85,0.90,0.95,0.97,0.99,1.00], #for change in the colors, not used fr linear
caption='Volume-to-Capacity ratio' #Caption for Color scale or Legend
)
# colors for congested speed and freespeed
color_range = ["#ff0000","#ff6666","#ffb2b2","#ffa500","#ffc966","#ffdb99", "#cce5cc","#99cc99","#66b266","#008000"]
step = cmp.StepColormap(color_range,vmin=0, vmax=100,index=[0,5,10,15,25,35,45,55,65,100], #for change in the colors, not used fr linear
caption=' Speeds (mph)' #Caption for Color scale or Legend
)
def getColorMap_pct(x):
return str(step_pct(x))
def getColorMap_pct_vc(x):
return str(step_pct_vc(x))
def getColorMap(x):
return str(step(x))
sf_timevariantnetwork["fillColor_ratio"] = sf_timevariantnetwork["linkstats_ratio"].apply(getColorMap_pct)
sf_timevariantnetwork["fillColor_vc_ratio"] = sf_timevariantnetwork["linkstats_vc_ratio"].apply(getColorMap_pct_vc)
sf_timevariantnetwork["fillColor_freespeed_mph"] = sf_timevariantnetwork["linkstats_freespeed_mph"].apply(getColorMap)
sf_timevariantnetwork["fillColor_congspd_mph"] = sf_timevariantnetwork["linkstats_congspd_mph"].apply(getColorMap)
def coords(geom):
return list(geom.coords)
sf_timevariantnetwork['points'] = sf_timevariantnetwork.apply(lambda row: coords(row.geometry), axis=1)
# groupby and aggreage columns by segment_links
df1 = sf_timevariantnetwork.groupby('id').agg({'modes':'first',
'length':'first',
'lanes':list,
'capacity':list,
'geometry':'first',
'linkstats_freespeed':list,
'linkstats_volume':list,
'linkstats_traveltime':list,
'date_time':list,
'linkstats_freespeed_mph':list,
'linkstats_congspd_mph':list,
'linkstats_ratio':list,
'linkstats_vc_ratio':list,
'time':list,
'fillColor_ratio':list,
'linkstats_volume':list,
'linkstats_traveltime':list,
'fillColor_freespeed_mph':list,
'fillColor_congspd_mph':list,
'fillColor_vc_ratio':list,
'points':'first'}).reset_index()
# Create timemap for ratio_congestedspeed_freespeed
def coords(geom):
return list(geom.coords)
features_ratio = [
{
'type':'Feature',
"geometry":{
'type': 'LineString',
'coordinates': coords(d.geometry),
},
'properties': {
'times': d['time'],
'color': "black",
'colors':d["fillColor_ratio"],
"weight":0.6,
"fillOpacity": 0.4,
}
}
for _,d in df1.iterrows()
]
from jinja2 import Template
_template = Template("""
{% macro script(this, kwargs) %}
L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({
_getDisplayDateFormat: function(date){
var newdate = new moment(date);
console.log(newdate)
return newdate.format("{{this.date_options}}");
}
});
{{this._parent.get_name()}}.timeDimension = L.timeDimension(
{
period: {{ this.period|tojson }},
}
);
var timeDimensionControl = new L.Control.TimeDimensionCustom(
{{ this.options|tojson }}
);
{{this._parent.get_name()}}.addControl(this.timeDimensionControl);
var geoJsonLayer = L.geoJson({{this.data}}, {
pointToLayer: function (feature, latLng) {
if (feature.properties.icon == 'marker') {
if(feature.properties.iconstyle){
return new L.Marker(latLng, {
icon: L.icon(feature.properties.iconstyle)});
}
//else
return new L.Marker(latLng);
}
if (feature.properties.icon == 'circle') {
if (feature.properties.iconstyle) {
return new L.circleMarker(latLng, feature.properties.iconstyle)
};
//else
return new L.circleMarker(latLng);
}
//else
return new L.Marker(latLng);
},
style: function(feature) {
lastIdx=feature.properties.colors.length-1
currIdx=feature.properties.colors.indexOf(feature.properties.color);
if(currIdx==lastIdx){
feature.properties.color = feature.properties.colors[currIdx+1]
}
else{
feature.properties.color =feature.properties.colors[currIdx+1]
}
return {color: feature.properties.color}
},
onEachFeature: function(feature, layer) {
if (feature.properties.popup) {
layer.bindPopup(feature.properties.popup);
}
}
})
var {{this.get_name()}} = L.timeDimension.layer.geoJson(
geoJsonLayer,
{
updateTimeDimension: true,
addlastPoint: {{ this.add_last_point|tojson }},
duration: {{ this.duration }},
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
import folium
from folium.plugins import TimestampedGeoJson
m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron")
t=TimestampedGeoJson({
'type': 'FeatureCollection',
'features': features_ratio,
}, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True)
t._template=_template
t.add_to(m)
step_pct.add_to(m)
# Add title
map_title = "Ratio between Congested Speed (mph) and Free Speed (mph)"
title_html = '''
<h3 align="center" style="font-size:16px"><b>{}</b></h3>
'''.format(map_title)
m.get_root().html.add_child(folium.Element(title_html))
file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_ratio_timemap.html"))
m.save(str(file_name))
# m
# Create timemap for v/c ratio
def coords(geom):
return list(geom.coords)
features_ratio = [
{
'type':'Feature',
"geometry":{
'type': 'LineString',
'coordinates': coords(d.geometry),
},
'properties': {
'times': d['time'],
'color': "black",
'colors':d["fillColor_vc_ratio"],
"weight":0.6,
"fillOpacity": 0.4,
}
}
for _,d in df1.iterrows()
]
from jinja2 import Template
_template = Template("""
{% macro script(this, kwargs) %}
L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({
_getDisplayDateFormat: function(date){
var newdate = new moment(date);
console.log(newdate)
return newdate.format("{{this.date_options}}");
}
});
{{this._parent.get_name()}}.timeDimension = L.timeDimension(
{
period: {{ this.period|tojson }},
}
);
var timeDimensionControl = new L.Control.TimeDimensionCustom(
{{ this.options|tojson }}
);
{{this._parent.get_name()}}.addControl(this.timeDimensionControl);
var geoJsonLayer = L.geoJson({{this.data}}, {
pointToLayer: function (feature, latLng) {
if (feature.properties.icon == 'marker') {
if(feature.properties.iconstyle){
return new L.Marker(latLng, {
icon: L.icon(feature.properties.iconstyle)});
}
//else
return new L.Marker(latLng);
}
if (feature.properties.icon == 'circle') {
if (feature.properties.iconstyle) {
return new L.circleMarker(latLng, feature.properties.iconstyle)
};
//else
return new L.circleMarker(latLng);
}
//else
return new L.Marker(latLng);
},
style: function(feature) {
lastIdx=feature.properties.colors.length-1
currIdx=feature.properties.colors.indexOf(feature.properties.color);
if(currIdx==lastIdx){
feature.properties.color = feature.properties.colors[currIdx+1]
}
else{
feature.properties.color =feature.properties.colors[currIdx+1]
}
return {color: feature.properties.color}
},
onEachFeature: function(feature, layer) {
if (feature.properties.popup) {
layer.bindPopup(feature.properties.popup);
}
}
})
var {{this.get_name()}} = L.timeDimension.layer.geoJson(
geoJsonLayer,
{
updateTimeDimension: true,
addlastPoint: {{ this.add_last_point|tojson }},
duration: {{ this.duration }},
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
import folium
from folium.plugins import TimestampedGeoJson
m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron")
t=TimestampedGeoJson({
'type': 'FeatureCollection',
'features': features_ratio,
}, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True)
t._template=_template
t.add_to(m)
step_pct_vc.add_to(m)
# Add title
map_title = "Volume-to-Capacity Ratio"
title_html = '''
<h3 align="center" style="font-size:16px"><b>{}</b></h3>
'''.format(map_title)
m.get_root().html.add_child(folium.Element(title_html))
file_name = BASE_DIR.parent.joinpath("exported","extended_run",("linkst_vc_ratio_timemap.html"))
m.save(str(file_name))
# m
# Create timemap for freespeed (mph)
def coords(geom):
return list(geom.coords)
features_freespeed = [
{
'type':'Feature',
"geometry":{
'type': 'LineString',
'coordinates': coords(d.geometry),
},
'properties': {
'times': d['time'],
'color': "black",
'colors':d["fillColor_freespeed_mph"],
'weight':0.6,
"fillOpacity": 0.4,
}
}
for _,d in df1.iterrows()
]
from jinja2 import Template
_template = Template("""
{% macro script(this, kwargs) %}
L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({
_getDisplayDateFormat: function(date){
var newdate = new moment(date);
console.log(newdate)
return newdate.format("{{this.date_options}}");
}
});
{{this._parent.get_name()}}.timeDimension = L.timeDimension(
{
period: {{ this.period|tojson }},
}
);
var timeDimensionControl = new L.Control.TimeDimensionCustom(
{{ this.options|tojson }}
);
{{this._parent.get_name()}}.addControl(this.timeDimensionControl);
var geoJsonLayer = L.geoJson({{this.data}}, {
pointToLayer: function (feature, latLng) {
if (feature.properties.icon == 'marker') {
if(feature.properties.iconstyle){
return new L.Marker(latLng, {
icon: L.icon(feature.properties.iconstyle)});
}
//else
return new L.Marker(latLng);
}
if (feature.properties.icon == 'circle') {
if (feature.properties.iconstyle) {
return new L.circleMarker(latLng, feature.properties.iconstyle)
};
//else
return new L.circleMarker(latLng);
}
//else
return new L.Marker(latLng);
},
style: function(feature) {
lastIdx=feature.properties.colors.length-1
currIdx=feature.properties.colors.indexOf(feature.properties.color);
if(currIdx==lastIdx){
feature.properties.color = feature.properties.colors[currIdx+1]
}
else{
feature.properties.color =feature.properties.colors[currIdx+1]
}
return {color: feature.properties.color}
},
onEachFeature: function(feature, layer) {
if (feature.properties.popup) {
layer.bindPopup(feature.properties.popup);
}
}
})
var {{this.get_name()}} = L.timeDimension.layer.geoJson(
geoJsonLayer,
{
updateTimeDimension: true,
addlastPoint: {{ this.add_last_point|tojson }},
duration: {{ this.duration }},
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
import folium
from folium.plugins import TimestampedGeoJson
m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron")
# Add title
map_title = "Free Speed (mph)"
title_html = '''
<h3 align="center" style="font-size:16px"><b>{}</b></h3>
'''.format(map_title)
m.get_root().html.add_child(folium.Element(title_html))
t=TimestampedGeoJson({
'type': 'FeatureCollection',
'features': features_freespeed,
}, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True)
t._template=_template
t.add_to(m)
step.add_to(m)
file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_freespeed_timemap.html"))
m.save(str(file_name))
# m
# Create timemap for congested speed (mph)
def coords(geom):
return list(geom.coords)
features_congestedspeed = [
{
'type':'Feature',
"geometry":{
'type': 'LineString',
'coordinates': coords(d.geometry),
},
'properties': {
'times': d['time'],
'color': "black",
'colors':d["fillColor_congspd_mph"],
'weight':0.6,
"fillOpacity": 0.4,
}
}
for _,d in df1.iterrows()
]
from jinja2 import Template
_template = Template("""
{% macro script(this, kwargs) %}
L.Control.TimeDimensionCustom = L.Control.TimeDimension.extend({
_getDisplayDateFormat: function(date){
var newdate = new moment(date);
console.log(newdate)
return newdate.format("{{this.date_options}}");
}
});
{{this._parent.get_name()}}.timeDimension = L.timeDimension(
{
period: {{ this.period|tojson }},
}
);
var timeDimensionControl = new L.Control.TimeDimensionCustom(
{{ this.options|tojson }}
);
{{this._parent.get_name()}}.addControl(this.timeDimensionControl);
var geoJsonLayer = L.geoJson({{this.data}}, {
pointToLayer: function (feature, latLng) {
if (feature.properties.icon == 'marker') {
if(feature.properties.iconstyle){
return new L.Marker(latLng, {
icon: L.icon(feature.properties.iconstyle)});
}
//else
return new L.Marker(latLng);
}
if (feature.properties.icon == 'circle') {
if (feature.properties.iconstyle) {
return new L.circleMarker(latLng, feature.properties.iconstyle)
};
//else
return new L.circleMarker(latLng);
}
//else
return new L.Marker(latLng);
},
style: function(feature) {
lastIdx=feature.properties.colors.length-1
currIdx=feature.properties.colors.indexOf(feature.properties.color);
if(currIdx==lastIdx){
feature.properties.color = feature.properties.colors[currIdx+1]
}
else{
feature.properties.color =feature.properties.colors[currIdx+1]
}
return {color: feature.properties.color}
},
onEachFeature: function(feature, layer) {
if (feature.properties.popup) {
layer.bindPopup(feature.properties.popup);
}
}
})
var {{this.get_name()}} = L.timeDimension.layer.geoJson(
geoJsonLayer,
{
updateTimeDimension: true,
addlastPoint: {{ this.add_last_point|tojson }},
duration: {{ this.duration }},
}
).addTo({{this._parent.get_name()}});
{% endmacro %}
""")
import folium
from folium.plugins import TimestampedGeoJson
m = folium.Map(location=[37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron")
t=TimestampedGeoJson({
'type': 'FeatureCollection',
'features': features_congestedspeed,
}, transition_time=1500,loop=True,period='PT1H', add_last_point=False,auto_play=True)
t._template=_template
t.add_to(m)
step.add_to(m)
# Add title
map_title = "Congested Speed (mph)"
title_html = '''
<h3 align="center" style="font-size:16px"><b>{}</b></h3>
'''.format(map_title)
m.get_root().html.add_child(folium.Element(title_html))
file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_congestedspeed_timemap.html"))
m.save(str(file_name))
# m
# get map for each different time
# static maps for congestedspeed/freespeed
def get_dataframe(_time):
# linkstats file
linkstats = pd.read_csv("../SF_all_trips/sf-tscore-all-trips-20PCsample-updatedRideHailFleet-updatedParking__etg/ITERS/it.4/4.linkstats.csv.gz", compression="gzip", low_memory=False)
# unmodified_linkstats = pd.read_csv(BASE_DIR.parent.joinpath("runs", "sf-tscore-int-int-trips-model-network-events-20PC-sample-bpr-func__tlm","ITERS","it.30", "30.linkstats_unmodified.csv.gz"),compression="gzip", low_memory=False)
time = int(_time)
linkstats = linkstats[linkstats["hour"]==(time)].copy()
linkstats=linkstats.add_prefix("linkstats_")
linkstats.rename(columns={('linkstats_link'): 'id'}, inplace=True)
linkstats["id"] = linkstats["id"].astype('string')
return linkstats
def highlight_function(feature):
return {"fillColor": "#ffff00", "color": "#ffff00", "weight": 5,"fillOpacity": 0.40 }
color_range_pct = ["#ff0000","#ff6666","#ffb2b2","#ffdb99","#ffc966", "#ffa500",'#e5f2e5','#cce5cc','#b2d8b2','#99cc99','#7fbf7f','#66b266','#4ca64c','#329932', '#198c19','#008000']
step_pct = cmp.StepColormap(
color_range_pct,
vmin=0, vmax=1,
index=[0,0.2,0.3,0.5,.6,0.7,0.80,0.85, 0.87,0.89,0.91,0.93,0.95,0.97,0.99,1.00], #for change in the colors, not used fr linear
caption='% Speeds Difference' #Caption for Color scale or Legend
)
# read the road network
sf_roadnetwork = gpd.read_file(BASE_DIR.parent.joinpath("Network", "sfNetwork.geojsonl"))
sf_roadnetwork = sf_roadnetwork[["id","modes","length","lanes","from","to","capacity","geometry"]]
for time_hour in tqdm(range(0,30)):
# set the map
pct_m = folium.Map([37.760015, -122.447110], zoom_start=13, tiles="cartodbpositron")
# get the hour and filter the linkstat file
linkstats = get_dataframe(str(time_hour))
# merge with featureclass of SF data
comparision_network = sf_roadnetwork.merge(linkstats,on="id")
# calculate the freespeed (mph), congested speed (mph), ratio (congestedsped/freespeed)
# linkstats
comparision_network["linkstats_freespeed_mph"] = comparision_network["linkstats_freespeed"]*2.23694
comparision_network["linkstats_congspd_mph"] = (comparision_network["linkstats_length"]/comparision_network["linkstats_traveltime"])*2.23694
comparision_network["linkstats_ratio"] = comparision_network["linkstats_congspd_mph"] / comparision_network["linkstats_freespeed_mph"]
time_stamp = ""
# folium
if time_hour<30:
time_stamp = f'{time_hour:02d}'
layer_name = str(time_stamp)
# layer_name=str(str(time_hour) + (' am' if time_hour < 12 else ' pm'))
ratio_feature_group = folium.FeatureGroup(name=layer_name)
pct_feature_group = folium.GeoJson(comparision_network,
name = ("Hour - " + layer_name),
style_function=lambda x: {
"fillColor": step_pct(x["properties"]["linkstats_ratio"]),
"color": step_pct(x["properties"]["linkstats_ratio"]),
"fillOpacity": 0.2,
"weight":1,
},
tooltip=folium.GeoJsonTooltip(fields=["id","length", "linkstats_freespeed_mph", "linkstats_traveltime","linkstats_congspd_mph"],
aliases=["Link ID", "Segment Length (m)", "Freespeed (mph)", "Travel time (sec)", "Congested Speed (mph)"], localize=True),
popup = folium.GeoJsonPopup(fields=["id","length", "linkstats_freespeed_mph", "linkstats_traveltime","linkstats_congspd_mph"],
aliases=["Link ID", "Segment Length (m)", "Freespeed (mph)", "Travel time (sec)", "Congested Speed (mph)"], localize=True),
highlight_function=highlight_function,
zoom_on_click=True
).add_to(ratio_feature_group)
# Add search functionality to the map
search_link = Search(layer=pct_feature_group, geom_type="LineString", placeholders = "Search for Link ID",
collapsed="False", search_label = 'id', search_zoom = 17, position='topleft',
).add_to(pct_m)
ratio_feature_group.add_to(pct_m)
folium.LayerControl().add_to(pct_m)
map_title = "Ratio between Congested Speed and Free Speed"
title_html = '''<h3 align="center" style="font-size:16px"><b>{}</b></h3>'''.format(map_title)
pct_m.get_root().html.add_child(folium.Element(title_html))
# save the file
file_name = BASE_DIR.parent.joinpath("exported", ("linkstat_ratio_timemap_{}.html").format(time_stamp))
pct_m.save(str(file_name))
```
| github_jupyter |
<a href="https://colab.research.google.com/github/malcolmrite-dsi/RockVideoClassifier/blob/main/RocksResnetTrainer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/drive')
import tensorflow as tf
import numpy as np
from tensorflow import keras
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Flatten, Dropout
from tensorflow.keras.applications.resnet import preprocess_input
from tensorflow.keras.applications import xception
import pandas as pd
import PIL
import matplotlib.pyplot as plt
import os
import shutil
```
# Training
```
train_datagen = keras.preprocessing.image.ImageDataGenerator(validation_split=0.2, preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(
'/content/drive/My Drive/Module 2 shared folder/samples',
subset="training",
seed=3,
target_size=(64, 64),
batch_size=64,
class_mode='categorical')
val_generator = train_datagen.flow_from_directory( '/content/drive/My Drive/Module 2 shared folder/samples',
subset="validation",
seed=3,
target_size=(64, 64),
batch_size=64,
class_mode='categorical')
train_datagen = keras.preprocessing.image.ImageDataGenerator(preprocessing_function=preprocess_input)
train_generator = train_datagen.flow_from_directory(
'/content/drive/My Drive/Module 2 shared folder/samples',
subset="training",
seed=3,
target_size=(64, 64),
batch_size=64,
class_mode='categorical')
resnet = keras.applications.ResNet50(include_top=False, pooling="max", input_shape=(64,64,3))
# mark loaded layers as not trainable
for layer in resnet.layers:
layer.trainable = False
data_augmentation = tf.keras.Sequential([
keras.layers.experimental.preprocessing.RandomFlip("horizontal_and_vertical"),
#keras.layers.experimental.preprocessing.RandomRotation(0.2),
])
# mark loaded layers as not trainable
#for layer in resnet.layers:
#layer.trainable = False
flat = Flatten()(resnet.layers[-1].output)
dense = Dense(1024, activation='relu')(flat)
output = Dense(5, activation='softmax')(dense)
model = Model(inputs=resnet.inputs, outputs=output)
model.summary()
model.compile(loss="categorical_crossentropy", optimizer=keras.optimizers.Adam(), metrics=["categorical_accuracy"])
checkpoint_best = keras.callbacks.ModelCheckpoint("/content/drive/My Drive/model_best.h5",
monitor='loss', verbose=0, save_best_only=True, save_weights_only=False, save_freq='epoch')
checkpoint = keras.callbacks.ModelCheckpoint("/content/drive/My Drive/model_last.h5",
verbose=0, save_best_only=False, save_weights_only=False, save_freq='epoch')
model.fit(
train_generator,
epochs = 5,
validation_data=val_generator,
callbacks=[checkpoint_best]
)
model.evaluate(val_generator)
model.fit(
train_generator,
initial_epoch=10,
epochs = 20,
validation_data=val_generator, callbacks=[checkpoint, checkpoint_best]
)
model.save("/content/drive/My Drive/model_best_64.h5")
```
| github_jupyter |
<img src='./img/EU-Copernicus-EUM_3Logos.png' alt='Logo EU Copernicus EUMETSAT' align='right' width='40%'></img>
<br>
<a href="./00_index.ipynb"><< Index</a><span style="float:right;"><a href="./02_AC_SAF_GOME-2_L2_produce_gridded_dataset_L3.ipynb">02 - AC SAF GOME-2 - Produce gridded dataset (L3)>></a>
<br>
# Optional: Introduction to Python and Project Jupyter
## Project Jupyter
<div class="alert alert-block alert-success" align="center">
<b><i>"Project Jupyter exists to develop open-source software, open-standards, and services for interactive computing across dozens of programming languages."</i></b>
</div>
<br>
Project Jupyter offers different tools to facilitate interactive computing, either with a web-based application (`Jupyter Notebooks`), an interactive development environment (`JupyterLab`) or via a `JupyterHub` that brings interactive computing to groups of users.
<br>
<center><img src='./img/jupyter_environment.png' alt='Logo Jupyter environment' width='60%'></img></center>
* **Jupyter Notebook** is an open-source web application that allows you to create and share documents that contain live code, equations, visualizations and narrative text.
* **JupyterLab 1.0: Jupyter’s Next-Generation Notebook Interface** <br> JupyterLab is a web-based interactive development environment for Jupyter notebooks, code, and data.
* **JupyterHub** <br>JupyterHub brings the power of notebooks to groups of users. It gives users access to computational environments and resources without burdening the users with installation and maintenance tasks. <br> Users - including students, researchers, and data scientists - can get their work done in their own workspaces on shared resources which can be managed efficiently by system administrators.
<hr>
## Why Jupyter Notebooks?
* Started with Python support, now **support of over 40 programming languages, including Python, R, Julia, ...**
* Notebooks can **easily be shared via GitHub, NBViewer, etc.**
* **Code, data and visualizations are combined in one place**
* A great tool for **teaching**
* **JupyterHub allows you to access an environment ready to code**
## Installation
### Installing Jupyter using Anaconda
Anaconda comes with the Jupyter Notebook installed. You just have to download Anaconda and following the installation instructions. Once installed, the jupyter notebook can be started with:
```
jupyter notebook
```
### Installing Jupyter with pip
Experienced Python users may want to install Jupyter using Python's package manager `pip`.
With `Python3` you do:
```
python3 -m pip install --upgrade pip
python3 -m pip install jupyter
```
In order to run the notebook, you run the same command as with Anaconda at the Terminal :
```
jupyter notebook
```
## Jupyter notebooks UI
* Notebook dashboard
* Create new notebook
* Notebook editor (UI)
* Menu
* Toolbar
* Notebook area and cells
* Cell types
* Code
* Markdown
* Edit (green) vs. Command mode (blue)
<br>
<div style='text-align:center;'>
<figure><img src='./img/notebook_ui.png' width='100%'/>
<figcaption><i>Notebook editor User Interface (UI)</i></figcaption>
</figure>
</div>
## Shortcuts
Get an overview of the shortcuts by hitting `H` or go to `Help/Keyboard shortcuts`
#### Most useful shortcuts
* `Esc` - switch to command mode
* `B` - insert below
* `A` - insert above
* `M` - Change current cell to Markdown
* `Y` - Change current cell to code
* `DD` - Delete cell
* `Enter` - go back to edit mode
* `Esc + F` - Find and replace on your code
* `Shift + Down / Upwards` - Select multiple cells
* `Shift + M` - Merge multiple cells
## Cell magics
Magic commands can make your life a lot easier, as you only have one command instead of an entire function or multiple lines of code.<br>
> Go to an [extensive overview of magic commands]()
### Some of the handy ones
**Overview of available magic commands**
```
%lsmagic
```
**See and set environment variables**
```
%env
```
**Install and list libraries**
```
!pip install numpy
!pip list | grep pandas
```
**Write cell content to a Python file**
```
%%writefile hello_world.py
print('Hello World')
```
**Load a Python file**
```
%pycat hello_world.py
```
**Get the time of cell execution**
```
%%time
tmpList = []
for i in range(100):
tmpList.append(i+i)
print(tmpList)
```
**Show matplotlib plots inline**
```
%matplotlib inline
```
<br>
## Sharing Jupyter Notebooks
### Sharing static Jupyter Notebooks
* [nbviewer](https://nbviewer.jupyter.org/) - A simple way to share Jupyter Notebooks. You can simply paste the GitHub location of your Jupyter notebook there and it is nicely rendered.
* [GitHub](https://github.com/) - GitHub offers an internal rendering of Jupyter Notebooks. There are some limitations and time delays of the proper rendering. Thus, we would suggest to use `nbviewer` to share nicely rendered Jupyter Notebooks.
### Reproducible Jupyter Notebooks
<img src="./img/mybinder_logo.png" align="left" width="30%"></img>
[Binder](https://mybinder.org/) allows you to open notebooks hosted on a Git repo in an executable environment, making the code immediately reproducible by anyone, anywhere.
Binder builds a Docker image of the repo where the notebooks are hosted.
<br>
## Ressources
* [Project Jupyter](https://jupyter.org/)
* [JupyterHub](https://jupyterhub.readthedocs.io/en/stable/)
* [JupyterLab](https://jupyterlab.readthedocs.io/en/stable/)
* [nbviewer](https://nbviewer.jupyter.org/)
* [Binder](https://mybinder.org/)
<br>
<a href="./00_index.ipynb"><< Index</a><span style="float:right;"><a href="./02_AC_SAF_GOME-2_L2_produce_gridded_dataset_L3.ipynb">02 - AC SAF GOME-2 - Produce gridded dataset (L3)>></a>
<hr>
<img src='./img/copernicus_logo.png' alt='Logo EU Copernicus' align='right' width='20%'><br><br><br>
<br>
<p style="text-align:right;">This project is licensed under the <a href="./LICENSE">MIT License</a> and is developed under a Copernicus contract.
| github_jupyter |
<center>
<a href="http://www.insa-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo-insa.jpg" style="float:left; max-width: 120px; display: inline" alt="INSA"/></a>
<a href="http://wikistat.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/wikistat.jpg" style="max-width: 250px; display: inline" alt="Wikistat"/></a>
<a href="http://www.math.univ-toulouse.fr/" ><img src="http://www.math.univ-toulouse.fr/~besse/Wikistat/Images/logo_imt.jpg" style="float:right; max-width: 200px; display: inline" alt="IMT"/> </a>
</center>
# [Ateliers: Technologies des grosses données](https://github.com/wikistat/Ateliers-Big-Data)
# Recommandation de Films par Filtrage Collaboratif: [NMF](http://wikistat.fr/pdf/st-m-explo-nmf.pdf) de la librairie [SparkML](https://spark.apache.org/docs/latest/ml-guide.html) de <a href="http://spark.apache.org/"><img src="http://spark.apache.org/images/spark-logo-trademark.png" style="max-width: 100px; display: inline" alt="Spark"/></a>
## 1. Introduction
Ce calepin traite d'un problème classique de recommandation par filtrage collaboratif en utilisant les ressources de la librairie [MLlib de Spark]([http://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.recommendation.ALS) avec l'API pyspark. Le problème général est décrit en [introduction](https://github.com/wikistat/Ateliers-Big-Data/tree/master/3-MovieLens) et dans une [vignette](http://wikistat.fr/pdf/st-m-datSc3-colFil.pdf) de [Wikistat](http://wikistat.fr/). Il est appliqué aux données publiques du site [GroupLens](http://grouplens.org/datasets/movielens/). L'objectif est de tester les méthodes et la procédure d'optimisation sur le plus petit jeu de données composé de 100k notes de 943 clients sur 1682 films où chaque client a au moins noté 20 films. Les jeux de données plus gros (1M, 10M, 20M notes) peuvent être utilisés pour "passer à l'échelle volume".
Ce calepin s'inspire des exemples de la [documentation](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.recommendation.ALS) et d'un [tutoriel](https://github.com/jadianes/spark-movie-lens/blob/master/notebooks/building-recommender.ipynb) de [Jose A. Dianes](https://www.codementor.io/jadianes). Le sujet a été traité lors d'un [Spark Summit](https://databricks-training.s3.amazonaws.com/movie-recommendation-with-mllib.html).
L'objectif est d'utiliser ces seules données pour proposer des recommandations. Les données initiales sont sous la forme d'une matrice **très creuse** (*sparse*) contenant des notes ou évaluations. **Attention**, les "0" de la matrice ne sont pas des notes mais des *données manquantes*, le film n'a pas encore été vu ou évalué.
Un algorithme satisfaisant à l'objectif de *complétion de grande matrice creuse*, et implémenté dans un logiciel libre d'accès est disponible dans la librairie [softImpute de R](https://cran.r-project.org/web/packages/softImpute/index.html). SOn utilisaiton est décrite dans un autre [calepin](https://github.com/wikistat/Ateliers-Big-Data/blob/master/3-MovieLens/Atelier-MovieLens-softImpute.ipynb). La version de [NMF](http://wikistat.fr/pdf/st-m-explo-nmf.pdf) de [MLlib de Spark](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.recommendation.ALS) autorise permet également la complétion.
En revanche,la version de NMF incluse dans la librairie [Scikit-learn](http://scikit-learn.org/stable/modules/generated/sklearn.decomposition.NMF.html) traite également des [matrices creuses](http://docs.scipy.org/doc/scipy/reference/sparse.html) mais le critère (moindres carrés) optimisé considère les "0" comme des notes nulles, pas comme des données manquantes. *Elle n'est pas adaptée au problème de complétion*, contrairement à celle de MLliB. Il faudrait sans doute utiliser la librairie [nonnegfac](https://github.com/kimjingu/nonnegfac-python) en Python de [Kim et al. (2014)](http://link.springer.com/content/pdf/10.1007%2Fs10898-013-0035-4.pdf); **à tester**!
Dans la première partie, le plus petit fichier est partagé en trois échantillons: apprentissage, validation et test; l'optimisation du rang de la factorisation (nombre de facteurs latents) est réalisée par minimisation de l'erreur estimée sur l'échantillon de validation.
Ensuite le plus gros fichier est utilisé pour évaluer l'impact de la taille de la base d'apprentissage.
## 2 Importation des données en HDFS
Les données doivent être stockées à un emplacement accessibles de tous les noeuds du cluster pour permettre la construction de la base de données réparties (RDD). Dans une utilisation monoposte (*standalone*) de *Spark*, elles sont simplement chargées dans le répertoire courant.
```
sc
# Chargement des fichiers si ce n'est déjà fait
#Renseignez ici le dossier où vous souhaitez stocker le fichier téléchargé.
DATA_PATH=""
import urllib.request
# fichier réduit
f = urllib.request.urlretrieve("http://www.math.univ-toulouse.fr/~besse/Wikistat/data/ml-ratings100k.csv",DATA_PATH+"ml-ratings100k.csv")
```
Les données sont lues comme une seule ligne de texte avant d'être restructurées au bon format d'une *matrice creuse* à savoir une liste de triplets contenant les indices de ligne, de colonne et la note pour les seules valeurs renseignées.
```
# Importer les données au format texte dans un RDD
small_ratings_raw_data = sc.textFile(DATA_PATH+"ml-ratings100k.csv")
# Identifier et afficher la première ligne
small_ratings_raw_data_header = small_ratings_raw_data.take(1)[0]
print(small_ratings_raw_data_header)
# Create RDD without header
all_lines = small_ratings_raw_data.filter(lambda l : l!=small_ratings_raw_data_header)
# Séparer les champs (user, item, note) dans un nouveau RDD
from pyspark.sql import Row
split_lines = all_lines.map(lambda l : l.split(","))
ratingsRDD = split_lines.map(lambda p: Row(user=int(p[0]), item=int(p[1]),
rating=float(p[2]), timestamp=int(p[3])))
# .cache() : le RDD est conservé en mémoire une fois traité
ratingsRDD.cache()
# Display the two first rows
ratingsRDD.take(2)
# Convert RDD to DataFrame
ratingsDF = spark.createDataFrame(ratingsRDD)
ratingsDF.take(2)
```
## 3. Optimisation du rang sur l'échantillon 10k
Le fichier comporte 10 000 évaluations croisant les avis de mille utilisateurs sur les films qu'ils ont vus parmi 1700.
### 3.1 Constitution des échantillons
Séparation aléatoire en trois échantillons apprentissage, validation et test. Le paramètre de rang est optimisé en minimisant l'estimaiton de l'erreur sur l'échantillon test. Cette stratégie, plutôt qu'ue validation croisée est plus adaptée à des données massives.
```
tauxTrain=0.6
tauxVal=0.2
tauxTes=0.2
# Si le total est inférieur à 1, les données sont sous-échantillonnées.
(trainDF, validDF, testDF) = ratingsDF.randomSplit([tauxTrain, tauxVal, tauxTes])
# validation et test à prédire, sans les notes
validDF_P = validDF.select("user", "item")
testDF_P = testDF.select("user", "item")
trainDF.take(2), validDF_P.take(2), testDF_P.take(2)
```
### 3.2 Optimisation du rang de la NMF
L'erreur d'imputation des données, donc de recommandation, est estimée sur l'échantillon de validation pour différentes valeurs (grille) du rang de la factorisation matricielle.
Il faudrait en principe aussi optimiser la valeur du paramètre de pénalisation pris à 0.1 par défaut.
*Point important:* l'erreur d'ajustement de la factorisation ne prend en compte que les valeurs listées dans la matrice creuses, pas les "0" qui sont des données manquantes.
```
from pyspark.ml.recommendation import ALS
import math
import collections
# Initialisation du générateur
seed = 5
# Nombre max d'itérations (ALS)
maxIter = 10
# Régularisation L1; à optimiser également
regularization_parameter = 0.1
# Choix d'une grille pour les valeurs du rang à optimiser
ranks = [4, 8, 12]
#Initialisation variable
# création d'un dictionaire pour stocker l'erreur par rang testé
errors = collections.defaultdict(float)
tolerance = 0.02
min_error = float('inf')
best_rank = -1
best_iteration = -1
from pyspark.ml.evaluation import RegressionEvaluator
for rank in ranks:
als = ALS( rank=rank, seed=seed, maxIter=maxIter,
regParam=regularization_parameter)
model = als.fit(trainDF)
# Prévision de l'échantillon de validation
predDF = model.transform(validDF).select("prediction","rating")
#Remove unpredicter row due to no-presence of user in the train dataset
pred_without_naDF = predDF.na.drop()
# Calcul du RMSE
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(pred_without_naDF)
print("Root-mean-square error for rank %d = "%rank + str(rmse))
errors[rank] = rmse
if rmse < min_error:
min_error = rmse
best_rank = rank
# Meilleure solution
print('Rang optimal: %s' % best_rank)
```
### 3.3 Résultats et test
```
# Quelques prévisions
pred_without_naDF.take(3)
```
Prévision finale de l'échantillon test.
```
#On concatane la DataFrame Train et Validatin
trainValidDF = trainDF.union(validDF)
# On crée un model avec le nouveau Dataframe complété d'apprentissage et le rank fixé à la valeur optimal
als = ALS( rank=best_rank, seed=seed, maxIter=maxIter,
regParam=regularization_parameter)
model = als.fit(trainValidDF)
#Prediction sur la DataFrame Test
testDF = model.transform(testDF).select("prediction","rating")
#Remove unpredicter row due to no-presence of user in the trai dataset
pred_without_naDF = predDF.na.drop()
# Calcul du RMSE
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(pred_without_naDF)
print("Root-mean-square error for rank %d = "%best_rank + str(rmse))
```
## 3 Analyse du fichier complet
MovieLens propose un plus gros fichier avec 20M de notes (138000 utilisateurs, 27000 films). Ce fichier est utilisé pour extraire un fichier test de deux millions de notes à reconstruire. Les paramètres précédemment optimisés, ils pourraient sans doute l'être mieux, sont appliqués pour une succesion d'estimation / prévision avec une taille croissante de l'échantillon d'apprentissage. Il aurait été plus élégant d'automatiser le travail dans une boucle mais lorsque les données sont les plus volumineuses des comportement mal contrôlés de Spark peuvent provoquer des plantages par défaut de mémoire.
### 3.1 Lecture des données
Le fichier est prétraité de manière analogue.
```
# Chargement des fichiers si ce n'est déjà fait
import urllib.request
# fichier complet mais compressé
f = urllib.request.urlretrieve("http://www.math.univ-toulouse.fr/~besse/Wikistat/data/ml-ratings20M.zip",DATA_PATH+"ml-ratings20M.zip")
#Unzip downloaded file
import zipfile
zip_ref = zipfile.ZipFile(DATA_PATH+"ml-ratings20M.zip", 'r')
zip_ref.extractall(DATA_PATH)
zip_ref.close()
# Importer les données au format texte dans un RDD
ratings_raw_data = sc.textFile(DATA_PATH+"ratings20M.csv")
# Identifier et afficher la première ligne
ratings_raw_data_header = ratings_raw_data.take(1)[0]
ratings_raw_data_header
# Create RDD without header
all_lines = ratings_raw_data.filter(lambda l : l!=ratings_raw_data_header)
# Séparer les champs (user, item, note) dans un nouveau RDD
split_lines = all_lines.map(lambda l : l.split(","))
ratingsRDD = split_lines.map(lambda p: Row(user=int(p[0]), item=int(p[1]),
rating=float(p[2]), timestamp=int(p[3])))
# Display the two first rows
ratingsRDD.take(2)
# Convert RDD to DataFrame
ratingsDF = spark.createDataFrame(ratingsRDD)
ratingsDF.take(2)
```
### 3.2 Echantillonnage
Extraction de l'échantillon test et éventuellement sous-échantillonnage de l'échantillon d'apprentissage.
```
tauxTest=0.1
# Si le total est inférieur à 1, les données sont sous-échantillonnées.
(trainTotDF, testDF) = ratingsDF.randomSplit([1-tauxTest, tauxTest])
# Sous-échantillonnage de l'apprentissage permettant de
# tester pour des tailles croissantes de cet échantillon
tauxEch=0.2
(trainDF, DropData) = trainTotDF.randomSplit([tauxEch, 1-tauxEch])
testDF.take(2), trainDF.take(2)
```
### 3.3 Estimation du modèle
Le modèle est estimé en utilisant les valeurs des paramètres obtenues dans l'étape précédente.
```
import time
time_start=time.time()
# Initialisation du générateur
seed = 5
# Nombre max d'itérations (ALS)
maxIter = 10
# Régularisation L1 (valeur par défaut)
regularization_parameter = 0.1
best_rank = 8
# Estimation pour chaque valeur de rang
als = ALS(rank=rank, seed=seed, maxIter=maxIter,
regParam=regularization_parameter)
model = als.fit(trainDF)
time_end=time.time()
time_als=(time_end - time_start)
print("ALS prend %d s" %(time_als))
```
### 3.4 Prévision de l'échantillon test et erreur
```
# Prévision de l'échantillon de validation
predDF = model.transform(testDF).select("prediction","rating")
#Remove unpredicter row due to no-presence of user in the train dataset
pred_without_naDF = predDF.na.drop()
# Calcul du RMSE
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",
predictionCol="prediction")
rmse = evaluator.evaluate(pred_without_naDF)
print("Root-mean-square error for rank %d = "%best_rank + str(rmse))
trainDF.count()
```
Quelques résultats montrant l'évolution du temps de calcul et de l'erreur de prévision en fonction de la taille de l'échantillon d'apprentissage. Attention, il est probable que la valeur des paramètres optimaux dépendent de la taille de l'échantillon d'apprentissage.
Taille | Temps(s) | RMSE
-------|-------|------
217439 | 70 | 1.65
1029416| 73 | 1.06
2059855| 72 | 1.05
4119486| 89 | 0.88
6176085| 99 | 0.85
10301909| 117 | 0.83
12361034| 125 | 0.83
14414907| 137 | 0.82
16474087| 148 | 0.818
18538142| 190 | 0.816
20596263| 166 | 0.82
| github_jupyter |
# Experiments comparing the performance of traditional pooling operations and entropy pooling within a shallow neural network and Lenet. The experiments use cifar10 and cifar100.
```
%matplotlib inline
import torch
import torchvision
import torchvision.transforms as transforms
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR100(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=8)
testset = torchvision.datasets.CIFAR100(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=8)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.utils import _pair, _quadruple
import time
from skimage.measure import shannon_entropy
from scipy import stats
from torch.nn.modules.utils import _pair, _quadruple
import time
from skimage.measure import shannon_entropy
from scipy import stats
import numpy as np
class EntropyPool2d(nn.Module):
def __init__(self, kernel_size=3, stride=1, padding=0, same=False, entr='high'):
super(EntropyPool2d, self).__init__()
self.k = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _quadruple(padding) # convert to l, r, t, b
self.same = same
self.entr = entr
def _padding(self, x):
if self.same:
ih, iw = x.size()[2:]
if ih % self.stride[0] == 0:
ph = max(self.k[0] - self.stride[0], 0)
else:
ph = max(self.k[0] - (ih % self.stride[0]), 0)
if iw % self.stride[1] == 0:
pw = max(self.k[1] - self.stride[1], 0)
else:
pw = max(self.k[1] - (iw % self.stride[1]), 0)
pl = pw // 2
pr = pw - pl
pt = ph // 2
pb = ph - pt
padding = (pl, pr, pt, pb)
else:
padding = self.padding
return padding
def forward(self, x):
# using existing pytorch functions and tensor ops so that we get autograd,
# would likely be more efficient to implement from scratch at C/Cuda level
start = time.time()
x = F.pad(x, self._padding(x), mode='reflect')
x_detached = x.cpu().detach()
x_unique, x_indices, x_inverse, x_counts = np.unique(x_detached,
return_index=True,
return_inverse=True,
return_counts=True)
freq = torch.FloatTensor([x_counts[i] / len(x_inverse) for i in x_inverse]).cuda()
x_probs = freq.view(x.shape)
x_probs = x_probs.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
x_probs = x_probs.contiguous().view(x_probs.size()[:4] + (-1,))
if self.entr is 'high':
x_probs, indices = torch.min(x_probs.cuda(), dim=-1)
elif self.entr is 'low':
x_probs, indices = torch.max(x_probs.cuda(), dim=-1)
else:
raise Exception('Unknown entropy mode: {}'.format(self.entr))
x = x.unfold(2, self.k[0], self.stride[0]).unfold(3, self.k[1], self.stride[1])
x = x.contiguous().view(x.size()[:4] + (-1,))
indices = indices.view(indices.size() + (-1,))
pool = torch.gather(input=x, dim=-1, index=indices)
return pool.squeeze(-1)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import time
from sklearn.metrics import f1_score
MAX = 'max'
AVG = 'avg'
HIGH_ENTROPY = 'high_entr'
LOW_ENTROPY = 'low_entr'
class Net1Pool(nn.Module):
def __init__(self, num_classes=10, pooling=MAX):
super(Net1Pool, self).__init__()
self.conv1 = nn.Conv2d(3, 30, 5)
if pooling is MAX:
self.pool = nn.MaxPool2d(2, 2)
elif pooling is AVG:
self.pool = nn.AvgPool2d(2, 2)
elif pooling is HIGH_ENTROPY:
self.pool = EntropyPool2d(2, 2, entr='high')
elif pooling is LOW_ENTROPY:
self.pool = EntropyPool2d(2, 2, entr='low')
self.fc0 = nn.Linear(30 * 14 * 14, num_classes)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = x.view(-1, 30 * 14 * 14)
x = F.relu(self.fc0(x))
return x
class Net2Pool(nn.Module):
def __init__(self, num_classes=10, pooling=MAX):
super(Net2Pool, self).__init__()
self.conv1 = nn.Conv2d(3, 50, 5, 1)
self.conv2 = nn.Conv2d(50, 50, 5, 1)
if pooling is MAX:
self.pool = nn.MaxPool2d(2, 2)
elif pooling is AVG:
self.pool = nn.AvgPool2d(2, 2)
elif pooling is HIGH_ENTROPY:
self.pool = EntropyPool2d(2, 2, entr='high')
elif pooling is LOW_ENTROPY:
self.pool = EntropyPool2d(2, 2, entr='low')
self.fc1 = nn.Linear(5*5*50, 500)
self.fc2 = nn.Linear(500, num_classes)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.pool(x)
x = F.relu(self.conv2(x))
x = self.pool(x)
x = x.view(-1, 5*5*50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
def configure_net(net, device):
net.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
return net, optimizer, criterion
def train(net, optimizer, criterion, trainloader, device, epochs=10, logging=2000):
for epoch in range(epochs):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
start = time.time()
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % logging == logging - 1:
print('[%d, %5d] loss: %.3f duration: %.5f' %
(epoch + 1, i + 1, running_loss / logging, time.time() - start))
running_loss = 0.0
print('Finished Training')
def test(net, testloader, device):
correct = 0
total = 0
predictions = []
l = []
with torch.no_grad():
for data in testloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
predictions.extend(predicted.cpu().numpy())
l.extend(labels.cpu().numpy())
print('Accuracy: {}'.format(100 * correct / total))
epochs = 10
logging = 15000
num_classes = 100
print('- - - - - - - - -- - - - 2 pool - - - - - - - - - - - - - - - -')
print('- - - - - - - - -- - - - MAX - - - - - - - - - - - - - - - -')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=MAX), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
print('- - - - - - - - -- - - - AVG - - - - - - - - - - - - - - - -')
net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=AVG), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
print('- - - - - - - - -- - - - HIGH - - - - - - - - - - - - - - - -')
net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=HIGH_ENTROPY), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
print('- - - - - - - - -- - - - LOW - - - - - - - - - - - - - - - -')
net, optimizer, criterion = configure_net(Net2Pool(num_classes=num_classes, pooling=LOW_ENTROPY), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
print('- - - - - - - - -- - - - 1 pool - - - - - - - - - - - - - - - -')
print('- - - - - - - - -- - - - MAX - - - - - - - - - - - - - - - -')
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=MAX), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
print('- - - - - - - - -- - - - AVG - - - - - - - - - - - - - - - -')
net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=AVG), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
print('- - - - - - - - -- - - - HIGH - - - - - - - - - - - - - - - -')
net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=HIGH_ENTROPY), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
print('- - - - - - - - -- - - - LOW - - - - - - - - - - - - - - - -')
net, optimizer, criterion = configure_net(Net1Pool(num_classes=num_classes, pooling=LOW_ENTROPY), device)
train(net, optimizer, criterion, trainloader, device, epochs=epochs, logging=logging)
test(net, testloader, device)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/AIWintermuteAI/aXeleRate/blob/dev/resources/aXeleRate_mark_detector.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
## M.A.R.K. Detection model Training and Inference
In this notebook we will use axelerate, Keras-based framework for AI on the edge, to quickly setup model training and then after training session is completed convert it to .tflite and .kmodel formats.
First, let's take care of some administrative details.
1) Before we do anything, make sure you have choosen GPU as Runtime type (in Runtime - > Change Runtime type).
2) We need to mount Google Drive for saving our model checkpoints and final converted model(s). Press on Mount Google Drive button in Files tab on your left.
In the next cell we clone axelerate Github repository and import it.
**It is possible to use pip install or python setup.py install, but in that case you will need to restart the enironment.** Since I'm trying to make the process as streamlined as possibile I'm using sys.path.append for import.
```
%load_ext tensorboard
#we need imgaug 0.4 for image augmentations to work properly, see https://stackoverflow.com/questions/62580797/in-colab-doing-image-data-augmentation-with-imgaug-is-not-working-as-intended
!pip uninstall -y imgaug && pip uninstall -y albumentations && pip install imgaug==0.4
!git clone https://github.com/AIWintermuteAI/aXeleRate.git
import sys
sys.path.append('/content/aXeleRate')
from axelerate import setup_training, setup_inference
```
At this step you typically need to get the dataset. You can use !wget command to download it from somewhere on the Internet or !cp to copy from My Drive as in this example
```
!cp -r /content/drive/'My Drive'/pascal_20_segmentation.zip .
!unzip --qq pascal_20_segmentation.zip
```
Dataset preparation and postprocessing are discussed in the article here:
The annotation tool I use is LabelImg
https://github.com/tzutalin/labelImg
Let's visualize our detection model test dataset. There are images in validation folder with corresponding annotations in PASCAL-VOC format in validation annotations folder.
```
%matplotlib inline
!gdown https://drive.google.com/uc?id=1s2h6DI_1tHpLoUWRc_SavvMF9jYG8XSi #dataset
!gdown https://drive.google.com/uc?id=1-bDRZ9Z2T81SfwhHEfZIMFG7FtMQ5ZiZ #pre-trained model
!unzip --qq mark_dataset.zip
from axelerate.networks.common_utils.augment import visualize_detection_dataset
visualize_detection_dataset(img_folder='mark_detection/imgs_validation', ann_folder='mark_detection/ann_validation', num_imgs=10, img_size=224, augment=True)
```
Next step is defining a config dictionary. Most lines are self-explanatory.
Type is model frontend - Classifier, Detector or Segnet
Architecture is model backend (feature extractor)
- Full Yolo
- Tiny Yolo
- MobileNet1_0
- MobileNet7_5
- MobileNet5_0
- MobileNet2_5
- SqueezeNet
- NASNetMobile
- DenseNet121
- ResNet50
For more information on anchors, please read here
https://github.com/pjreddie/darknet/issues/568
Labels are labels present in your dataset.
IMPORTANT: Please, list all the labels present in the dataset.
object_scale determines how much to penalize wrong prediction of confidence of object predictors
no_object_scale determines how much to penalize wrong prediction of confidence of non-object predictors
coord_scale determines how much to penalize wrong position and size predictions (x, y, w, h)
class_scale determines how much to penalize wrong class prediction
For converter type you can choose the following:
'k210', 'tflite_fullint', 'tflite_dynamic', 'edgetpu', 'openvino', 'onnx'
## Parameters for Person Detection
K210, which is where we will run the network, has constrained memory (5.5 RAM) available, so with Micropython firmware, the largest model you can run is about 2 MB, which limits our architecture choice to Tiny Yolo, MobileNet(up to 0.75 alpha) and SqueezeNet. Out of these 3 architectures, only one comes with pre-trained model - MobileNet. So, to save the training time we will use Mobilenet with alpha 0.75, which has ... parameters. For objects that do not have that much variety, you can use MobileNet with lower alpha, down to 0.25.
```
config = {
"model":{
"type": "Detector",
"architecture": "MobileNet5_0",
"input_size": 224,
"anchors": [0.57273, 0.677385, 1.87446, 2.06253, 3.33843, 5.47434, 7.88282, 3.52778, 9.77052, 9.16828],
"labels": ["mark"],
"coord_scale" : 1.0,
"class_scale" : 1.0,
"object_scale" : 5.0,
"no_object_scale" : 1.0
},
"weights" : {
"full": "",
"backend": "imagenet"
},
"train" : {
"actual_epoch": 50,
"train_image_folder": "mark_detection/imgs",
"train_annot_folder": "mark_detection/ann",
"train_times": 1,
"valid_image_folder": "mark_detection/imgs_validation",
"valid_annot_folder": "mark_detection/ann_validation",
"valid_times": 1,
"valid_metric": "mAP",
"batch_size": 32,
"learning_rate": 1e-3,
"saved_folder": F"/content/drive/MyDrive/mark_detector",
"first_trainable_layer": "",
"augumentation": True,
"is_only_detect" : False
},
"converter" : {
"type": ["k210","tflite"]
}
}
```
Let's check what GPU we have been assigned in this Colab session, if any.
```
from tensorflow.python.client import device_lib
device_lib.list_local_devices()
```
Also, let's open Tensorboard, where we will be able to watch model training progress in real time. Training and validation logs also will be saved in project folder.
Since there are no logs before we start the training, tensorboard will be empty. Refresh it after first epoch.
```
%tensorboard --logdir logs
```
Finally we start the training by passing config dictionary we have defined earlier to setup_training function. The function will start the training with Checkpoint, Reduce Learning Rate on Plateau and Early Stopping callbacks. After the training has stopped, it will convert the best model into the format you have specified in config and save it to the project folder.
```
from keras import backend as K
K.clear_session()
model_path = setup_training(config_dict=config)
```
After training it is good to check the actual perfomance of your model by doing inference on your validation dataset and visualizing results. This is exactly what next block does. Obviously since our model has only trained on a few images the results are far from stellar, but if you have a good dataset, you'll have better results.
```
from keras import backend as K
K.clear_session()
setup_inference(config, model_path)
```
My end results are:
{'fscore': 0.942528735632184, 'precision': 0.9318181818181818, 'recall': 0.9534883720930233}
**You can obtain these results by loading a pre-trained model.**
Good luck and happy training! Have a look at these articles, that would allow you to get the most of Google Colab or connect to local runtime if there are no GPUs available;
https://medium.com/@oribarel/getting-the-most-out-of-your-google-colab-2b0585f82403
https://research.google.com/colaboratory/local-runtimes.html
| github_jupyter |
```
# Example of how to use the CGMFtk package
```
# Table of Contents
1. [Import Modules](#import)
2. [Read the History File](#read)
3. [Summary Table](#table)
4. [Histogram Fission Fragment Properties](#ffHistograms)
5. [Correlated Observables](#correlations)
6. [Neutron Properties](#neutrons)
7. [Gamma Properties](#gammas)
8. [Gamma-ray Timing Information](#timing)
9. [Angular Correlations](#angles)
### 1. import modules for the notebook
<a id='import'></a>
```
import numpy as np
import os
import matplotlib.pyplot as plt
from CGMFtk import histories as fh
# also define some plotting features
import matplotlib as mpl
mpl.rcParams['font.size'] = 12
mpl.rcParams['font.family'] = 'Helvetica','serif'
mpl.rcParams['font.weight'] = 'normal'
mpl.rcParams['axes.labelsize'] = 18.
mpl.rcParams['xtick.labelsize'] = 18.
mpl.rcParams['ytick.labelsize'] = 18.
mpl.rcParams['lines.linewidth'] = 2.
mpl.rcParams['xtick.major.pad'] = '10'
mpl.rcParams['ytick.major.pad'] = '10'
mpl.rcParams['image.cmap'] = 'BuPu'
# define a working directory where the history files are stored
workdir = './'
histFile = 'histories.out'
timeFile = 'histories.out'
yeildFile = 'yeilds.cgmf.0'
nevents = int(1E6)
```
### 2. Read CGMF history file
<a id='read'></a>
```
# run Cf-252 sf with a single OM param file (#42)
directory = "/home/beykyle/db/projects/OM/KDOMPuq/KDUQSamples"
for filename in os.scandir(directory):
if filename.is_file() and "42" in filename.path:
#if filename.is_file():
print(filename.path)
os.system("mpirun -np 8 --use-hwthread-cpus cgmf.mpi.x -t -1 -i 98252 -e 0.0 -n 100 -o" + filename.path)
os.system("cat histories.cgmf.* > histories.out")
os.system("rm histories.cgmf.*")
print("Analyzing histories")
hist = fh.Histories(workdir + histFile, nevents=nevents)
# print the number of events in the file
print ('This file contains ',str(hist.getNumberEvents()),' events and ',str(hist.getNumberFragments()),' fission fragments')
```
With the option 'nevents', the number of fission events that are read can be specified:
hist = fh.Histories('92235_1MeV.cgmf',nevents=5000)
### 3. Summary Table
<a id='table'></a>
```
# provide a summary table of the fission events
hist.summaryTable()
```
### 4. Fission Fragment Properties
<a id='ffHistograms'></a>
With the histogram function from matplotlib, we can easily plot distributions of the fission fragment characteristics
```
# plot the distributions of the fission fragments
A = hist.getA() # get A of all fragments
AL = hist.getALF() # get A of light fragments
AH = hist.getAHF() # get A of heavy fragments
fig = plt.figure(figsize=(8,6))
bins = np.arange(min(A),max(A))
h,b = np.histogram(A,bins=bins,density=True)
plt.plot(b[:-1],h,'-o')
plt.xlabel('Mass (A)')
plt.ylabel('Frequency')
plt.show()
Z = hist.getZ()
ZL = hist.getZLF()
ZH = hist.getZHF()
fig = plt.figure(figsize=(8,6))
bins = np.arange(min(Z),max(Z))
h,b = np.histogram(Z,bins=bins,density=True)
plt.plot(b[:-1],h,'-o')
plt.xlabel('Charge (Z)')
plt.ylabel('Frequency')
plt.show()
fig = plt.figure(figsize=(8,6))
TKEpre = hist.getTKEpre() # TKE before neutron emission
TKEpost = hist.getTKEpost() # TKE after neutron emission
bins = np.arange(min(TKEpre),max(TKEpre))
h,b = np.histogram(TKEpre,bins=bins,density=True)
plt.plot(0.5*(b[:-1]+b[1:]),h,'-o',label='Before neutron emission')
bins = np.arange(min(TKEpost),max(TKEpost))
h,b = np.histogram(TKEpost,bins=bins,density=True)
plt.plot(0.5*(b[:-1]+b[1:]),h,'-o',label='After neutron emission')
plt.legend()
plt.xlabel('Total Kinetic Energy (MeV)')
plt.ylabel('Frequency')
plt.show()
```
With the 2D histogram feature, we can see correlations between the calculated features from CGMF
```
TKEpre = hist.getTKEpre()
TXE = hist.getTXE()
bx = np.arange(min(TKEpre),max(TKEpre))
by = np.arange(min(TXE),max(TXE))
fig = plt.figure(figsize=(8,6))
plt.hist2d(TKEpre,TXE,bins=(bx,by),density=True)
plt.xlabel('Total Kinetic Energy (MeV)')
plt.ylabel('Total Excitation Energy (MeV)')
plt.colorbar()
plt.show()
```
### 5. Correlated Observables
<a id='correlations'></a>
Many observables within fission are correlated with one another. Sometimes, these are best visualized as two-dimensional histograms as in the TKE-TXE plot directly above. Other times, it is helpful to plot certain observables as a function of mass or TKE. There are routines within CGMFtk that easily construct those, as demonstrated here:
```
# nubar as a function of mass
## nubarg, excitation energy, kinetic energy (pre), and spin are available as a function of mass
nubarA = hist.nubarA()
TKEA = hist.TKEA()
fig = plt.figure(figsize=(16,6))
plt.subplot(121)
plt.plot(nubarA[0],nubarA[1],'ko')
plt.xlabel('Mass (u)')
plt.ylabel(r'$\overline{\nu}$')
plt.subplot(122)
plt.plot(TKEA[0],TKEA[1],'ko')
plt.xlabel('Mass (u)')
plt.ylabel(r'Total Kinetic Energy (MeV)')
plt.tight_layout()
plt.show()
```
### 6. Neutron properties
<a id='neutrons'></a>
```
# construct and plot the neutron multiplicity distribution
nu,pnu = hist.Pnu()
fig = plt.figure(figsize=(8,6))
plt.plot(nu,pnu,'k*--',markersize=10)
plt.xlabel(r'$\nu$')
plt.ylabel(r'P($\nu$)')
plt.show()
# construct and plot the prompt neutron spectrum
fig = plt.figure(figsize=(16,6))
plt.subplot(121)
ebins,pfns = hist.pfns()
plt.step(ebins,pfns,where='mid')
plt.xlim(0,20)
plt.xlabel('Outgoing neutron energy (MeV)')
plt.ylabel('PFNS')
plt.subplot(122)
plt.step(ebins,pfns,where='mid')
plt.xlim(0.01,20)
plt.xscale('log')
plt.yscale('log')
plt.xlabel('Outgoing neutron energy (MeV)')
plt.ylabel('PFNS')
plt.tight_layout()
plt.show()
# average number of prompt neutrons
print ('nubar (per fission event) = ',hist.nubartot())
print ('average number of neutrons per fragment = ',hist.nubar())
# average neutron energies
print ('Neutron energies in the lab:')
print ('Average energy of all neutrons = ',hist.meanNeutronElab())
print ('Average energy of neutrons from fragments = ',hist.meanNeutronElabFragments())
print ('Average energy of neutrons from light fragment = ',hist.meanNeutronElabLF())
print ('Average energy of neutrons from heavy fragment = ',hist.meanNeutronElabHF())
print (' ')
print ('Neutron energies in the center of mass:')
print ('Average energy of neutrons from fragments = ',hist.meanNeutronEcmFragments())
print ('Average energy of neutrons from light fragment = ',hist.meanNeutronEcmLF())
print ('Average energy of neutrons from heavy fragment = ',hist.meanNeutronEcmHF())
```
Note that the energies are not recorded for the pre-fission neutrons in the center of mass frame
### 7. Gamma properties
<a id='gammas'></a>
```
# construct and plot the gamma multiplicity distribution
nug,pnug = hist.Pnug()
fig = plt.figure(figsize=(8,6))
plt.plot(nug,pnug,'k*--',markersize=10)
plt.xlabel(r'$N_\gamma$')
plt.ylabel(r'P($N_\gamma$)')
plt.show()
# construct and plot the prompt neutron spectrum
fig = plt.figure(figsize=(16,6))
plt.subplot(121)
ebins,pfgs = hist.pfgs()
plt.step(ebins,pfgs,where='mid')
plt.xlim(0,5)
plt.xlabel(r'Outgoing $\gamma$ energy (MeV)')
plt.ylabel('PFGS')
plt.subplot(122)
plt.step(ebins,pfgs,where='mid')
plt.xlim(0.1,5)
plt.xscale('log')
plt.yscale('log')
plt.xlabel(r'Outgoing $\gamma$ energy (MeV)')
plt.ylabel('PFGS')
plt.ylim(1e-2,30)
plt.tight_layout()
plt.show()
# average number of prompt neutrons
print ('nugbar (per fission event) = ',hist.nubargtot())
print ('average number of gammas per fragment = ',hist.nubarg())
# perform gamma-ray spectroscopy
gE = 0.2125 # gamma ray at 212.5 keV
dE = 0.01 # 1% energy resolution
gspec1 = hist.gammaSpec(gE,dE*gE,post=True)
# calculate the percentage of events for each A/Z
As1 = np.unique(gspec1[:,1])
totEvents = len(gspec1)
fracA1 = []
for A in As1:
mask = gspec1[:,1]==A
fracA1.append(len(gspec1[mask])/totEvents)
Zs1 = np.unique(gspec1[:,0])
fracZ1 = []
for Z in Zs1:
mask = gspec1[:,0]==Z
fracZ1.append(len(gspec1[mask])/totEvents)
fig = plt.figure(figsize=(8,6))
plt.plot(As1,fracA1,'--')
plt.xlabel('Fission Fragment Mass (A)')
plt.ylabel('Fraction of Events')
plt.text(135,0.170,r'$\epsilon_\gamma$=212.5 keV',fontsize=18)
plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(Zs1,fracZ1,'--')
plt.xlabel('Fission Fragment Charge (Z)')
plt.ylabel('Fraction of Events')
plt.show()
# average neutron energies
print ('Gamma energies in the lab:')
print ('Average energy of all gammas = ',hist.meanGammaElab())
print ('Average energy of gammas from light fragment = ',hist.meanGammaElabLF())
print ('Average energy of gammas from heavy fragment = ',hist.meanGammaElabHF())
```
Note that in the current version of CGMF, only the doppler shifted lab energies are recorded in the history file
### 8. Gamma-ray Timing Information
<a id='timing'></a>
We can also calculate quantities that are related to the time at which the 'late' prompt gamma rays are emitted. When the option -t -1 is included in the run time options for CGMF, these gamma-ray times are printed out in the CGMF history file. The Histories class can read these times based on the header of the CGMF history file.
```
histTime = fh.Histories(workdir + timeFile, nevents=nevents*2)
# gamma-ray times can be retrieved through
gammaAges = histTime.getGammaAges()
```
The nubargtot function can also be used to construct the average gamma-ray multiplicity per fission event as a function of time. In the call to nubarg() or nubargtot(), timeWindow=True should be included which uses the default timings provided in the function (otherwise, passing a numpy array or list of times to timeWindow will use those times). Optionally, a minimum gamma-ray energy cut-off can also be included, Eth.
```
times,nubargTime = histTime.nubarg(timeWindow=True) # include timeWindow as a boolean or list of times (in seconds) to activate this feature
fig = plt.figure(figsize=(8,6))
plt.plot(times,nubargTime,'o',label='Eth=0. MeV')
times,nubargTime = histTime.nubarg(timeWindow=True,Eth=0.1)
plt.plot(times,nubargTime,'o',label='Eth=0.1 MeV')
plt.xlabel('Time since fission (s)')
plt.ylabel(r'Averge $\gamma$-ray multiplicity')
plt.xscale('log')
plt.legend()
plt.show()
```
The prompt fission gamma-ray spectrum function, pfgs(), can also be used to calculate this quantity within a certain time window since the fission event. The time window is defined using minTime and maxTime to set the lower and upper boundaries.
```
fig = plt.figure(figsize=(8,6))
bE,pfgsTest = histTime.pfgs(minTime=5e-8,maxTime=500e-8)
plt.step(bE,pfgsTest,label='Time window')
bE,pfgsTest = histTime.pfgs()
plt.step(bE,pfgsTest,label='All events')
plt.yscale('log')
plt.xlim(0,2)
plt.ylim(0.1,100)
plt.xlabel('Gamma-ray Energy (MeV)')
plt.ylabel('Prompt Fission Gamma Spectrum')
plt.legend()
plt.show()
# calculate the gamma-ray multiplicity as a function of time since fission for a specific fission fragment
times,gMultiplicity = histTime.gammaMultiplicity(minTime=1e-8,maxTime=1e-6,Afragment=134,Zfragment=52)
# also compare to an exponential decay with the half life of the state
f = np.exp(-times*np.log(2)/1.641e-7) # the half life of 134Te is 164.1 ns
norm = gMultiplicity[0]/f[0]
fig = plt.figure(figsize=(8,6))
plt.plot(times*1e9,gMultiplicity/norm,'k-',label='CGMF')
plt.plot(times*1e9,f,'r--',label=r'exp($-t\cdot$log(2)/$\tau_{1/2}$)')
plt.legend()
plt.yscale('log')
plt.xlabel('Time since fission (ns)')
plt.ylabel(r'N$_\gamma$(t) (arb. units)')
plt.show()
# calculate the isomeric ratios for specific states in nuclei
# e.g. isomeric ratio for the 1/2- state in 99Nb, ground state is 9/2+, lifetime is 150 s
r = histTime.isomericRatio(thresholdTime=1,A=99,Z=41,Jm=0.5,Jgs=4.5)
print ('99Nb:',round(r,2))
# e.g. isomeric ratio for the 11/2- state in 133Te, ground state is 3/2+, lifetime is 917.4 s
r = histTime.isomericRatio(thresholdTime=1,A=133,Z=52,Jm=5.5,Jgs=1.5)
print ('133Te:',round(r,2))
```
### 9. Angular Correlations
<a id='angles'></a>
For the fission fragment angular distribution with respect to the beam axis/z-axis, there is one option: afterEmission=True/False. afterEmission=True uses these angles after neutron emission and afterEmission=False uses these angles before neutron emission. The default is True.
```
# calculate cos(theta) between the fragments and z-axis/beam axis
FFangles = hist.FFangles()
bins = np.linspace(-1,1,30)
h,b = np.histogram(FFangles,bins=bins,density=True)
# only light fragments
hLight,b = np.histogram(FFangles[::2],bins=bins,density=True)
# only heavy fragments
hHeavy,b = np.histogram(FFangles[1::2],bins=bins,density=True)
x = 0.5*(b[:-1]+b[1:])
fig = plt.figure(figsize=(8,6))
plt.plot(x,h,'k*',label='All Fragments')
plt.plot(x,hLight,'ro',label='Light Fragments')
plt.plot(x,hHeavy,'b^',label='Heavy Fragments')
plt.xlabel(r'cos($\theta$)')
plt.ylabel('Frequency')
plt.ylim(0.45,0.55)
plt.title('Fission fragment angles with respect to beam axis')
plt.legend()
plt.show()
```
There are several options when calculating the angles of the neutrons with respect to the beam axis/z-axis. The first is including a neutron threshold energy with keyword, Eth (given in MeV). We can also calculate these angles in the lab frame (lab=True, default) or in the center of mass frame of the compound system (lab=False). Finally, we can include pre-fission neutrons (includePrefission=True, default) or not include them (includePreFission=False). However, the pre-fission neutrons can only be include in the lab frame.
```
# calculate the angles between the neutrons and the z-axis/beam axis
nAllLab,nLLab,nHLab = hist.nangles(lab=True) # all neutrons, from the light fragment, from the heavy fragment
nAllCM,nLCM,nHCM = hist.nangles(lab=False) # center of mass frame of the compound
bins = np.linspace(-1,1,30)
hAllLab,b = np.histogram(nAllLab,bins=bins,density=True)
hLightLab,b = np.histogram(nLLab,bins=bins,density=True)
hHeavyLab,b = np.histogram(nHLab,bins=bins,density=True)
hAllcm,b = np.histogram(nAllCM,bins=bins,density=True)
hLightcm,b = np.histogram(nLCM,bins=bins,density=True)
hHeavycm,b = np.histogram(nHCM,bins=bins,density=True)
x = 0.5*(b[:-1]+b[1:])
fig = plt.figure(figsize=(8,6))
plt.plot(x,hAllLab,'k*',label='All Fragments')
plt.plot(x,hLightLab,'ro',label='Light Fragments')
plt.plot(x,hHeavyLab,'b^',label='Heavy Fragments')
plt.xlabel(r'cos($\theta$)')
plt.ylabel('Frequency')
plt.ylim(0.45,0.55)
plt.title('Neutron Angles with respect to beam axis in the Lab Frame')
plt.legend()
plt.show()
fig = plt.figure(figsize=(8,6))
plt.plot(x,hAllLab,'k*',label='Lab Frame')
plt.plot(x,hAllcm,'ro',label='CoM Frame')
plt.xlabel(r'cos($\theta$)')
plt.ylabel('Frequency')
plt.ylim(0.45,0.55)
plt.legend()
plt.show()
```
There are again several options that we can use when calculating the angles between all pairs of neutrons (from all framgments) and the ligh fragments, all of which have been seen in the last two examples. These include, Eth (neutron threshold energy), afterEmission (fission fragment angles are post or pre neutron emission), and includePrefission (to include or not include pre-fission neutrons).
```
# calculate the angles between the neutrons and the light fragments
nFall,nFLight,nFHeavy = hist.nFangles()
bins = np.linspace(-1,1,30)
hall,b = np.histogram(nFall,bins=bins,density=True)
hlight,b = np.histogram(nFLight,bins=bins,density=True)
hheavy,b = np.histogram(nFHeavy,bins=bins,density=True)
x = 0.5*(b[:-1]+b[1:])
fig = plt.figure(figsize=(8,6))
plt.plot(x,hall,'k*',label='All Fragments')
plt.plot(x,hlight,'ro',label='Light Fragments')
plt.plot(x,hheavy,'b^',label='Heavy Fragments')
plt.xlabel(r'cos($\theta$)')
plt.ylabel('Frequency')
plt.legend()
plt.show()
```
| github_jupyter |
# 0. required packages for h5py
```
%run "..\..\Startup_py3.py"
sys.path.append(r"..\..\..\..\Documents")
import ImageAnalysis3 as ia
%matplotlib notebook
from ImageAnalysis3 import *
print(os.getpid())
import h5py
from ImageAnalysis3.classes import _allowed_kwds
import ast
```
# 1. Create field-of-view class
```
reload(ia)
reload(classes)
reload(classes.batch_functions)
reload(classes.field_of_view)
reload(io_tools.load)
reload(visual_tools)
reload(ia.correction_tools)
reload(ia.correction_tools.alignment)
reload(ia.spot_tools.matching)
reload(ia.segmentation_tools.chromosome)
reload(ia.spot_tools.fitting)
fov_param = {'data_folder':r'\\10.245.74.158\Chromatin_NAS_1\20210320-proB_Dox_IAA_STI_CTP-08_2color',
'save_folder':r'\\10.245.74.212\Chromatin_NAS_2\IgH_analyzed_results\20210320_IgH_proB_iaa_dox+',
#'save_folder':r'D:\Pu_Temp\202009_IgH_proB_DMSO_2color',
'experiment_type': 'DNA',
'num_threads': 24,
'correction_folder':r'\\10.245.74.158\Chromatin_NAS_0\Corrections\20201012-Corrections_2color',
'shared_parameters':{
'single_im_size':[35,2048,2048],
'corr_channels':['750','647'],
'num_empty_frames': 0,
'corr_hot_pixel':True,
'corr_Z_shift':False,
'min_num_seeds':500,
'max_num_seeds': 2500,
'spot_seeding_th':125,
'normalize_intensity_local':False,
'normalize_intensity_background':False,
},
}
fov = classes.field_of_view.Field_of_View(fov_param, _fov_id=30,
_color_info_kwargs={
'_color_filename':'Color_Usage_clean',
},
_prioritize_saved_attrs=False,
)
```
### 2. Process image into candidate spots
```
reload(io_tools.load)
reload(spot_tools.fitting)
reload(correction_tools.chromatic)
reload(classes.batch_functions)
# process image into spots
id_list, spot_list = fov._process_image_to_spots('unique',
#_sel_ids=np.arange(41,47),
_load_common_reference=True,
_load_with_multiple=False,
_save_images=True,
_warp_images=False,
_overwrite_drift=False,
_overwrite_image=False,
_overwrite_spot=False,
_verbose=True)
```
# 3. Find chromosomes
## 3.1 load chromosome image
```
overwrite_chrom = False
chrom_im = fov._load_chromosome_image(_type='reverse',
_overwrite=overwrite_chrom)
```
## 3.2 find candidate chromosomes
```
chrom_coords = fov._find_candidate_chromosomes_by_segmentation(_filt_size=4,
_binary_per_th=99.75,
_morphology_size=2,
_overwrite=overwrite_chrom)
```
## 3.3 select among candidate chromosomes
```
chrom_coords = fov._select_chromosome_by_candidate_spots(_good_chr_loss_th=0.3,
_cand_spot_intensity_th=200,
_save=True,
_overwrite=overwrite_chrom)
```
### visualize chromosomes selections
```
%matplotlib notebook
%matplotlib notebook
## visualize
coord_dict = {'coords':[np.flipud(_coord) for _coord in fov.chrom_coords],
'class_ids':list(np.zeros(len(fov.chrom_coords),dtype=np.int)),
}
visual_tools.imshow_mark_3d_v2([fov.chrom_im],
given_dic=coord_dict,
save_file=None,
)
```
## select spots based on chromosomes
```
fov._load_from_file('unique')
intensity_th = 200
from ImageAnalysis3.spot_tools.picking import assign_spots_to_chromosomes
kept_spots_list = []
for _spots in fov.unique_spots_list:
kept_spots_list.append(_spots[_spots[:,0] > intensity_th])
# finalize candidate spots
cand_chr_spots_list = [[] for _ct in fov.chrom_coords]
for _spots in kept_spots_list:
_cands_list = assign_spots_to_chromosomes(_spots, fov.chrom_coords)
for _i, _cands in enumerate(_cands_list):
cand_chr_spots_list[_i].append(_cands)
print(f"kept chromosomes: {len(fov.chrom_coords)}")
reload(spot_tools.picking)
from ImageAnalysis3.spot_tools.picking import convert_spots_to_hzxys
dna_cand_hzxys_list = [convert_spots_to_hzxys(_spots, fov.shared_parameters['distance_zxy'])
for _spots in cand_chr_spots_list]
dna_reg_ids = fov.unique_ids
dna_reg_channels = fov.unique_channels
chrom_coords = fov.chrom_coords
# select_hzxys close to the chromosome center
dist_th = 3000 # upper limit is 3000nm
good_chr_th = 0.8 # 80% of regions should have candidate spots
sel_dna_cand_hzxys_list = []
sel_chrom_coords = []
chr_cand_pers = []
sel_chr_cand_pers = []
for _cand_hzxys, _chrom_coord in zip(dna_cand_hzxys_list, chrom_coords):
_chr_cand_per = 0
_sel_cands_list = []
for _cands in _cand_hzxys:
if len(_cands) == 0:
_sel_cands_list.append([])
else:
_dists = np.linalg.norm(_cands[:,1:4] - _chrom_coord*np.array([200,108,108]), axis=1)
_sel_cands_list.append(_cands[(_dists < dist_th)])
_chr_cand_per += 1
_chr_cand_per *= 1/len(_cand_hzxys)
# append
if _chr_cand_per >= good_chr_th:
sel_dna_cand_hzxys_list.append(_sel_cands_list)
sel_chrom_coords.append(_chrom_coord)
sel_chr_cand_pers.append(_chr_cand_per)
chr_cand_pers.append(_chr_cand_per)
print(f"kept chromosomes: {len(sel_chrom_coords)}")
```
### EM pick spots
```
%matplotlib inline
reload(spot_tools.picking)
from ImageAnalysis3.spot_tools.picking import _maximize_score_spot_picking_of_chr, pick_spots_by_intensities,pick_spots_by_scores, generate_reference_from_population, evaluate_differences
niter= 10
num_threads = 24
ref_chr_cts = None
# initialize
init_dna_hzxys = pick_spots_by_intensities(sel_dna_cand_hzxys_list)
# set save list
sel_dna_hzxys_list, sel_dna_scores_list, all_dna_scores_list = [init_dna_hzxys], [], []
for _iter in range(niter):
print(f"+ iter:{_iter}")
# E: generate reference
ref_ct_dists, ref_local_dists, ref_ints = generate_reference_from_population(
sel_dna_hzxys_list[-1], dna_reg_ids,
sel_dna_hzxys_list[-1], dna_reg_ids,
ref_channels=dna_reg_channels,
ref_chr_cts=ref_chr_cts,
num_threads=num_threads,
collapse_regions=True,
split_channels=True,
verbose=True,
)
plt.figure(figsize=(4,2), dpi=100)
for _k, _v in ref_ct_dists.items():
plt.hist(np.array(_v), bins=np.arange(0,2500,50), alpha=0.5, label=_k)
plt.legend(fontsize=8)
plt.title('center dist', fontsize=8)
plt.show()
plt.figure(figsize=(4,2), dpi=100)
for _k, _v in ref_local_dists.items():
plt.hist(np.array(_v), bins=np.arange(0,2500,50), alpha=0.5, label=_k)
plt.legend(fontsize=8)
plt.title('local dist', fontsize=8)
plt.show()
plt.figure(figsize=(4,2), dpi=100)
for _k, _v in ref_ints.items():
plt.hist(np.array(_v), bins=np.arange(0,5000,100), alpha=0.5, label=_k)
plt.legend(fontsize=8)
plt.title('intensity', fontsize=8)
plt.show()
# M: pick based on scores
sel_hzxys_list, sel_scores_list, all_scores_list, other_scores_list = \
pick_spots_by_scores(
sel_dna_cand_hzxys_list, dna_reg_ids,
ref_channels=dna_reg_channels,
ref_hzxys_list=sel_dna_hzxys_list[-1], ref_ids=dna_reg_ids,
ref_ct_dists=ref_ct_dists, ref_local_dists=ref_local_dists, ref_ints=ref_ints,
ref_chr_cts=ref_chr_cts,
num_threads=num_threads,
collapse_regions=True,
split_intensity_channels=True,
split_distance_channels=False,
return_other_scores=True,
verbose=True,
)
# check updating rate
update_rate = evaluate_differences(sel_hzxys_list, sel_dna_hzxys_list[-1])
print(f"-- region kept: {update_rate:.4f}")
# append
sel_dna_hzxys_list.append(sel_hzxys_list)
sel_dna_scores_list.append(sel_scores_list)
all_dna_scores_list.append(all_scores_list)
plt.figure(figsize=(4,2), dpi=100)
plt.hist(np.concatenate([np.concatenate(_scores)
for _scores in other_scores_list]),
bins=np.arange(-15, 0, 0.5), alpha=0.5, label='unselected')
plt.hist(np.ravel([np.array(_sel_scores)
for _sel_scores in sel_dna_scores_list[-1]]),
bins=np.arange(-15, 0, 0.5), alpha=0.5, label='selected')
plt.legend(fontsize=8)
plt.show()
if update_rate > 0.998:
break
%%timeit
spot_tools.picking.chromosome_center_dists(sel_dna_hzxys_list[0][0], ref_channels=dna_reg_channels, split_channels=False)
%%timeit
spot_tools.picking.chromosome_center_dists(sel_dna_hzxys_list[0][0], ref_channels=dna_reg_channels, split_channels=True)
from scipy.spatial.distance import pdist, squareform
sel_iter = -1
final_dna_hzxys_list = []
kept_chr_ids = []
distmap_list = []
score_th = -5
int_th = 200
bad_spot_percentage = 1.0 #0.5
for _hzxys, _scores in zip(sel_dna_hzxys_list[sel_iter], sel_dna_scores_list[sel_iter]):
_kept_hzxys = np.array(_hzxys).copy()
# remove spots by intensity
_bad_inds = _kept_hzxys[:,0] < int_th
# remove spots by scores
_bad_inds += _scores < score_th
#print(np.mean(_bad_inds))
_kept_hzxys[_bad_inds] = np.nan
if np.mean(np.isnan(_kept_hzxys).sum(1)>0)<bad_spot_percentage:
kept_chr_ids.append(True)
final_dna_hzxys_list.append(_kept_hzxys)
distmap_list.append(squareform(pdist(_kept_hzxys[:,1:4])))
else:
kept_chr_ids.append(False)
kept_chr_ids = np.array(kept_chr_ids, dtype=np.bool)
#kept_chrom_coords = np.array(sel_chrom_coords)[kept_chr_ids]
distmap_list = np.array(distmap_list)
median_distmap = np.nanmedian(distmap_list, axis=0)
loss_rates = np.mean(np.sum(np.isnan(final_dna_hzxys_list), axis=2)>0, axis=0)
print(np.mean(loss_rates))
fig, ax = plt.subplots(figsize=(4,2),dpi=200)
ax.plot(loss_rates, '.-')
ax.set_ylim([0,1])
ax.set_xticks(np.arange(0,len(dna_reg_ids),int(len(dna_reg_ids)/5)))
plt.show()
imaging_order = []
for _fd, _infos in fov.color_dic.items():
for _info in _infos:
if len(_info) > 0 and _info[0] == 'u':
if int(_info[1:]) in dna_reg_ids:
imaging_order.append(list(dna_reg_ids).index(int(_info[1:])))
imaging_order = np.array(imaging_order, dtype=np.int)
#kept_inds = imaging_order # plot imaging ordered regions
#kept_inds = np.where(loss_rates<0.5)[0] # plot good regions only
kept_inds = np.arange(len(fov.unique_ids)) # plot all
%matplotlib inline
fig, ax = plt.subplots(figsize=(4,3),dpi=200)
ax = ia.figure_tools.distmap.plot_distance_map(median_distmap[kept_inds][:,kept_inds],
color_limits=[0,600],
ax=ax,
ticks=np.arange(0,150,20),
figure_dpi=500)
ax.set_title(f"v-Abl ProB iaa_dox_STI+, n={len(distmap_list)}", fontsize=7.5)
_ticks = np.arange(0, len(kept_inds), 20)
ax.set_xticks(_ticks)
ax.set_xticklabels(dna_reg_ids[kept_inds][_ticks])
ax.set_xlabel(f"5kb region id", fontsize=7, labelpad=2)
ax.set_yticks(_ticks)
ax.set_yticklabels(dna_reg_ids[kept_inds][_ticks])
ax.set_ylabel(f"5kb region id", fontsize=7, labelpad=2)
ax.axvline(x=np.where(fov.unique_ids[kept_inds]>300)[0][0], color=[1,1,0])
ax.axhline(y=np.where(fov.unique_ids[kept_inds]>300)[0][0], color=[1,1,0])
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
```
## visualize single example
```
%matplotlib inline
reload(figure_tools.image)
chrom_id = 3
import matplotlib
import copy
sc_cmap = copy.copy(matplotlib.cm.get_cmap('seismic_r'))
sc_cmap.set_bad(color=[0.5,0.5,0.5,1])
#valid_inds = np.where(np.isnan(final_dna_hzxys_list[chrom_id]).sum(1) == 0)[0]
valid_inds = np.ones(len(final_dna_hzxys_list[chrom_id]), dtype=np.bool) # all spots
fig, ax = plt.subplots(figsize=(4,3),dpi=200)
ax = ia.figure_tools.distmap.plot_distance_map(
distmap_list[chrom_id][valid_inds][:,valid_inds],
color_limits=[0,600],
ax=ax,
cmap=sc_cmap,
ticks=np.arange(0,150,20),
figure_dpi=200)
ax.set_title(f"proB DMSO chrom: {chrom_id}", fontsize=7.5)
plt.gcf().subplots_adjust(bottom=0.1)
plt.show()
ax3d = figure_tools.image.chromosome_structure_3d_rendering(
final_dna_hzxys_list[chrom_id][valid_inds, 1:],
marker_edge_line_width=0,
reference_bar_length=200, image_radius=300,
line_width=0.5, figure_dpi=300, depthshade=False)
plt.show()
?figure_tools.image.chromosome_structure_3d_rendering
```
## visualize all fitted spots
```
with h5py.File(fov.save_filename, "r", libver='latest') as _f:
_grp = _f['unique']
raw_spots_list = [_spots[_spots[:,0] > 0] for _spots in _grp['raw_spots'][:]]
spots_list = [_spots[_spots[:,0] > 0] for _spots in _grp['spots'][:]]
from scipy.spatial.distance import cdist
picked_spot_inds_list = []
for _i, _id in enumerate(dna_reg_ids):
_cand_hzxys = spots_list[_i][:,1:4] * fov.shared_parameters['distance_zxy']
_dists = cdist(np.array(final_dna_hzxys_list)[:,_i,1:], _cand_hzxys)#, axis=1
_matched_spot_inds = []
for _ds in _dists:
if np.sum(np.isnan(_ds)) < len(_ds) and np.nanmin(_ds) < 0.01:
_matched_spot_inds.append(np.argmin(_ds))
else:
_matched_spot_inds.append(np.nan)
# append
picked_spot_inds_list.append(np.array(_matched_spot_inds))
#vis_inds = [0,1,2,3,4,5]
vis_inds = np.where(loss_rates > 0.8)[0]
vis_ims, vis_ids, vis_spot_list, vis_raw_spot_list = [], [], [], []
with h5py.File(fov.save_filename, "r", libver='latest') as _f:
_grp = _f['unique']
for _ind in vis_inds:
vis_ims.append(_grp['ims'][_ind])
vis_ids.append(_grp['ids'][_ind])
_picked_inds = picked_spot_inds_list[_ind]
_picked_inds = np.array(_picked_inds[np.isnan(_picked_inds)==False], dtype=np.int)
vis_spot_list.append(raw_spots_list[_ind][_picked_inds])
dna_reg_ids[59]
fov.color_dic
# visualize_all_chromosomes
%matplotlib notebook
%matplotlib notebook
## visualize
coord_dict = {'coords':[],
'class_ids':[],
}
for _i, _spots in enumerate(vis_spot_list):
coord_dict['coords'] += list(np.flipud(_spot[1:4]) for _spot in _spots)
coord_dict['class_ids'] += list(_i * np.ones(len(_spots),dtype=np.int))
fig=plt.figure(figsize=(4,6), dpi=150)
visual_tools.imshow_mark_3d_v2(vis_ims,
fig=fig,
given_dic=coord_dict,
save_file=None,
)
```
| github_jupyter |
CIFAR10 是另外一個 dataset, 和 mnist 一樣,有十種類別(飛機、汽車、鳥、貓、鹿、狗、青蛙、馬、船、卡車)
https://www.cs.toronto.edu/~kriz/cifar.html
```
import keras
from keras.models import Sequential
from PIL import Image
import numpy as np
import tarfile
# 讀取 dataset
# 只有 train 和 test 沒有 validation
import pickle
train_X=[]
train_y=[]
tar_gz = "../Week06/cifar-10-python.tar.gz"
with tarfile.open(tar_gz) as tarf:
for i in range(1, 6):
dataset = "cifar-10-batches-py/data_batch_%d"%i
print("load",dataset)
with tarf.extractfile(dataset) as f:
result = pickle.load(f, encoding='latin1')
train_X.extend(result['data']/255)
train_y.extend(result['labels'])
train_X=np.float32(train_X)
train_y=np.int32(train_y)
dataset = "cifar-10-batches-py/test_batch"
print("load",dataset)
with tarf.extractfile(dataset) as f:
result = pickle.load(f, encoding='latin1')
test_X=np.float32(result['data']/255)
test_y=np.int32(result['labels'])
train_Y = np.eye(10)[train_y]
test_Y = np.eye(10)[test_y]
validation_data = (test_X[:1000], test_Y[:1000])
test_data = (test_X[1000:], test_Y[1000:])
from IPython.display import display
def showX(X):
int_X = (X*255).clip(0,255).astype('uint8')
# N*3072 -> N*3*32*32 -> 32 * 32N * 3
int_X_reshape = np.moveaxis(int_X.reshape(-1,3,32,32), 1, 3)
int_X_reshape = int_X_reshape.swapaxes(0,1).reshape(32,-1, 3)
display(Image.fromarray(int_X_reshape))
# 訓練資料, X 的前 20 筆
showX(train_X[:20])
print(train_y[:20])
name_array = np.array("飛機、汽車、鳥、貓、鹿、狗、青蛙、馬、船、卡車".split('、'))
print(name_array[train_y[:20]])
```
將之前的 cnn model 套用過來看看
```
# %load ../Week06/q_cifar10_cnn.py
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape
model = Sequential()
model.add(Reshape((3, 32, 32), input_shape=(3*32*32,) ))
model.add(Conv2D(filters=32, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Conv2D(filters=64, kernel_size=(3,3), padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Reshape((-1,)))
model.add(Dense(units=1024, activation="relu"))
model.add(Dense(units=10, activation="softmax"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_X, train_Y, validation_data=validation_data, batch_size=100, epochs=10)
rtn = model.evaluate(*test_data)
print("\ntest accuracy=", rtn[1])
showX(test_X[:15])
predict_y = model.predict_classes(test_X[:15], verbose=False)
print(name_array[predict_y])
print(name_array[test_y[:15]])
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape, Dropout
model = Sequential()
model.add(Reshape((3, 32, 32), input_shape=(3*32*32,) ))
model.add(Conv2D(32, 3, padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Conv2D(64, 3, padding='same', activation="relu", data_format='channels_first'))
model.add(MaxPool2D())
model.add(Reshape((-1,)))
model.add(Dense(units=1024, activation="relu"))
model.add(Dropout(rate=0.4))
model.add(Dense(units=10, activation="softmax"))
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_X, train_Y, validation_data=validation_data, batch_size=100, epochs=10)
rtn = model.evaluate(*test_data)
print("\ntest accuracy=", rtn[1])
model.fit(train_X, train_Y, validation_data=validation_data, batch_size=100, epochs=10)
rtn = model.evaluate(*test_data)
print("\ntest accuracy=", rtn[1])
showX(test_X[:15])
predict_y = model.predict_classes(test_X[:15], verbose=False)
print(name_array[predict_y])
print(name_array[test_y[:15]])
```
不同的 activation
https://keras.io/activations/
```
# 先定義一個工具
def add_layers(model, *layers):
for l in layers:
model.add(l)
import keras
from keras.engine.topology import Layer
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape, Dropout
def MyConv2D(filters, kernel_size, **kwargs):
return (Conv2D(filters=filters, kernel_size=kernel_size,
padding='same', data_format='channels_first', **kwargs),
Activation("elu"))
model = Sequential()
add_layers( model,
Reshape((3, 32, 32), input_shape=(3*32*32,)),
*MyConv2D(32, 3),
MaxPool2D(),
*MyConv2D(64, 3),
MaxPool2D(),
Reshape((-1,)),
Dense(units=1024, activation="elu"),
Dropout(rate=0.4),
Dense(units=10, activation="softmax")
)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(train_X, train_Y, validation_data=validation_data, batch_size=100, epochs=10)
rtn = model.evaluate(*test_data)
print("\ntest accuracy=", rtn[1])
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape, Dropout, BatchNormalization
# 先處理資料
#GCN
train_X_mean = np.mean(train_X, axis=0, keepdims=True)
train_X_std = np.std(train_X, axis=0, keepdims=True)
preprocessed_train_X = (train_X-train_X_mean)/train_X_std
preprocessed_test_X = (test_X-train_X_mean)/train_X_std
preprocessed_validation_data = (preprocessed_test_X[:1000], test_Y[:1000])
preprocessed_test_data = (preprocessed_test_X[1000:], test_Y[1000:])
def MyConv2D(filters, kernel_size, **kwargs):
return (Conv2D(filters=filters, kernel_size=kernel_size,
padding='same', data_format='channels_first', **kwargs),
Activation("relu"))
def add_layers(model, *layers):
for l in layers:
model.add(l)
model = Sequential()
add_layers( model,
Reshape((3, 32, 32), input_shape=(3*32*32,)),
*MyConv2D(32, 3),
MaxPool2D(),
*MyConv2D(64, 3),
MaxPool2D(),
Reshape((-1,)),
Dense(units=1024, activation="relu"),
Dropout(rate=0.4),
Dense(units=10, activation="softmax")
)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(preprocessed_train_X, train_Y, validation_data=preprocessed_validation_data,
batch_size=100, epochs=10)
rtn = model.evaluate(*preprocessed_test_data)
print("\ntest accuracy=", rtn[1])
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape, Dropout, BatchNormalization
# 先處理資料
#GCN
train_X_mean = np.mean(train_X, axis=0, keepdims=True)
train_X_std = np.std(train_X, axis=0, keepdims=True)
preprocessed_train_X = (train_X-train_X_mean)/train_X_std
preprocessed_test_X = (test_X-train_X_mean)/train_X_std
preprocessed_validation_data = (preprocessed_test_X[:1000], test_Y[:1000])
preprocessed_test_data = (preprocessed_test_X[1000:], test_Y[1000:])
def MyConv2D(filters, kernel_size, **kwargs):
return (Conv2D(filters=filters, kernel_size=kernel_size,
padding='same', data_format='channels_first', **kwargs),
BatchNormalization(axis=1),
Activation("relu"))
def add_layers(model, *layers):
for l in layers:
model.add(l)
model = Sequential()
add_layers( model,
Reshape((3, 32, 32), input_shape=(3*32*32,)),
*MyConv2D(32, 3),
MaxPool2D(),
*MyConv2D(64, 3),
MaxPool2D(),
Reshape((-1,)),
Dense(units=1024, activation="relu"),
Dropout(0.4),
Dense(units=10, activation="softmax")
)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(preprocessed_train_X, train_Y, validation_data=preprocessed_validation_data,
batch_size=100, epochs=10)
rtn = model.evaluate(*preprocessed_test_data)
print("\ntest accuracy=", rtn[1])
model.fit(preprocessed_train_X, train_Y, validation_data=preprocessed_validation_data,
batch_size=100, epochs=10)
rtn = model.evaluate(*preprocessed_test_data)
print("\ntest accuracy=", rtn[1])
def zca_whitening_matrix(X):
"""
Function to compute ZCA whitening matrix (aka Mahalanobis whitening).
INPUT: X: [M x N] matrix.
Rows: Variables
Columns: Observations
OUTPUT: ZCAMatrix: [M x M] matrix
"""
X = X.T
# Covariance matrix [column-wise variables]: Sigma = (X-mu)' * (X-mu) / N
sigma = np.cov(X, rowvar=True) # [M x M]
# Singular Value Decomposition. X = U * np.diag(S) * V
U,S,V = np.linalg.svd(sigma)
# U: [M x M] eigenvectors of sigma.
# S: [M x 1] eigenvalues of sigma.
# V: [M x M] transpose of U
# Whitening constant: prevents division by zero
epsilon = 1e-5
# ZCA Whitening matrix: U * Lambda * U'
ZCAMatrix = np.dot(U, np.dot(np.diag(1.0/np.sqrt(S + epsilon)), U.T)) # [M x M]
return ZCAMatrix.T
# ZCAMatrix = zca_whitening_matrix(X0)
# new_train_X= ((train_X-train_X_mean)/train_X_std) @ ZCAMatrix
# 參考 https://keras.io/preprocessing/image/
# 輸入改成 tensor4
train_X = train_X.reshape(-1, 3, 32, 32)
test_X = test_X.reshape(-1, 3, 32, 32)
def MyConv2D(filters, kernel_size, **kwargs):
return (Conv2D(filters=filters, kernel_size=kernel_size,
padding='same', data_format='channels_first', **kwargs),
BatchNormalization(axis=1),
Activation("relu"))
model = Sequential()
add_layers( model,
*MyConv2D(32, 3, input_shape=(3,32,32)),
MaxPool2D(),
*MyConv2D(64, 3),
MaxPool2D(),
Reshape((-1,)),
Dense(units=1024, activation="relu"),
Dropout(0.4),
Dense(units=10, activation="softmax")
)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 使用 keras 的功能
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
zca_whitening=True,
data_format="channels_first")
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(train_X)
p_train_X, p_train_Y = datagen.flow(train_X, train_Y, batch_size=len(train_X), shuffle=False).next()
# 順序都沒變
assert (p_train_Y == train_Y).all()
p_test_X, p_test_Y = datagen.flow(test_X, test_Y, batch_size=len(test_X), shuffle=False).next()
# 順序都沒變
assert (p_test_Y == test_Y).all()
# 不需要這兩個
del p_train_Y, p_test_Y
p_validation_data = (p_test_X[:1000], test_Y[:1000])
p_test_data = (p_test_X[1000:], test_Y[1000:])
model.fit(p_train_X, train_Y, validation_data=p_validation_data,
batch_size=100, epochs=10)
rtn = model.evaluate(*p_test_data)
print("\ntest accuracy=", rtn[1])
```
使用動態資料處理
```python
# fits the model on batches with real-time data augmentation:
train_generator = datagen.flow(train_X, train_Y, batch_size=100, shuffle=False)
test_generator = datagen.flow(*test_data, batch_size=100, shuffle=False)
model.fit_generator(train_generator,
steps_per_epoch=len(train_X),
#validation_data=datagen.flow(*validation_data, batch_size=100),
#validation_steps=1000,
epochs=10)
rtn = model.evaluate_generator(test_generator, steps=9000)
```
```
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape, Dropout, BatchNormalization
# 輸入改成 tensor4
train_X = train_X.reshape(-1, 3, 32, 32)
test_X = test_X.reshape(-1, 3, 32, 32)
def MyConv2D(filters, kernel_size, **kwargs):
return (Conv2D(filters=filters, kernel_size=kernel_size,
padding='same', data_format='channels_first', **kwargs),
BatchNormalization(axis=1),
Activation("elu"))
model = Sequential()
add_layers( model,
*MyConv2D(64, 3, input_shape=(3,32,32)),
*MyConv2D(64, 3),
MaxPool2D(),
*MyConv2D(128, 3),
*MyConv2D(128, 3),
MaxPool2D(),
*MyConv2D(256, 3),
*MyConv2D(256, 3),
Reshape((-1,)),
Dense(units=1024),
BatchNormalization(),
Activation("elu"),
Dropout(0.4),
Dense(units=10, activation="softmax")
)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 使用 keras 的功能
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
zca_whitening=True,
data_format="channels_first")
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(train_X)
p_train_X, p_train_Y = datagen.flow(train_X, train_Y, batch_size=len(train_X), shuffle=False).next()
# 順序都沒變
assert (p_train_Y == train_Y).all()
p_test_X, p_test_Y = datagen.flow(test_X, test_Y, batch_size=len(test_X), shuffle=False).next()
# 順序都沒變
assert (p_test_Y == test_Y).all()
# 不需要這兩個
del p_train_Y, p_test_Y
p_validation_data = (p_test_X[:1000], test_Y[:1000])
p_test_data = (p_test_X[1000:], test_Y[1000:])
model.fit(p_train_X, train_Y, validation_data=p_validation_data,
batch_size=100, epochs=10)
rtn = model.evaluate(*p_test_data)
print("\ntest accuracy=", rtn[1])
model.fit(p_train_X, train_Y, validation_data=p_validation_data,
batch_size=100, epochs=10)
rtn = model.evaluate(*p_test_data)
```
lasagne 中相同的
```python
_ = InputLayer(shape=(None, 3*32*32), input_var=input_var)
_ = DropoutLayer(_, 0.2)
_ = ReshapeLayer(_, ([0], 3, 32, 32))
_ = conv(_, 96, 3)
_ = conv(_, 96, 3)
_ = MaxPool2DDNNLayer(_, 3, 2)
_ = DropoutLayer(_, 0.5)
_ = conv(_, 192, 3)
_ = conv(_, 192, 3)
_ = MaxPool2DDNNLayer(_, 3, 2)
_ = DropoutLayer(_, 0.5)
_ = conv(_, 192, 3)
_ = conv(_, 192, 1)
_ = conv(_, 10, 1)
_ = Pool2DDNNLayer(_, 7, mode='average_exc_pad')
_ = FlattenLayer(_)
l_out = NonlinearityLayer(_, nonlinearity=lasagne.nonlinearities.softmax)
```
```
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape, Dropout, BatchNormalization, GlobalAveragePooling2D
# 輸入改成 tensor4
train_X = train_X.reshape(-1, 3, 32, 32)
test_X = test_X.reshape(-1, 3, 32, 32)
def MyConv2D(filters, kernel_size, **kwargs):
return (Conv2D(filters=filters, kernel_size=kernel_size,
padding='same', data_format='channels_first', **kwargs),
BatchNormalization(axis=1, momentum=0.9),
Activation("relu"))
model = Sequential()
add_layers( model,
Dropout(0.2, input_shape=(3,32,32)),
*MyConv2D(96, 3),
*MyConv2D(96, 3),
MaxPool2D(3, 2),
*MyConv2D(192, 3),
*MyConv2D(192, 3),
MaxPool2D(3, 2),
Dropout(0.5),
*MyConv2D(192, 3),
*MyConv2D(192, 1),
*MyConv2D(10, 1),
GlobalAveragePooling2D(data_format='channels_first'),
Activation("softmax")
)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(p_train_X, train_Y, validation_data=p_validation_data,
batch_size=100, epochs=50)
rtn = model.evaluate(*p_test_data)
print("\ntest accuracy=", rtn[1])
import keras
from keras.layers import Dense, Activation, Conv2D, MaxPool2D, Reshape, Dropout, BatchNormalization, GlobalAveragePooling2D
# 輸入改成 tensor4
train_X = train_X.reshape(-1, 3, 32, 32)
test_X = test_X.reshape(-1, 3, 32, 32)
def MyConv2D(filters, kernel_size, **kwargs):
return (Conv2D(filters=filters, kernel_size=kernel_size,
padding='same', data_format='channels_first', **kwargs),
BatchNormalization(axis=1, momentum=0.9),
Activation("relu"))
model = Sequential()
add_layers( model,
Dropout(0.2, input_shape=(3,32,32)),
*MyConv2D(96, 3),
*MyConv2D(96, 3),
MaxPool2D(3, 2),
*MyConv2D(192, 3),
*MyConv2D(192, 3),
MaxPool2D(3, 2),
Dropout(0.5),
*MyConv2D(192, 3),
*MyConv2D(192, 1),
*MyConv2D(10, 1),
GlobalAveragePooling2D(data_format='channels_first'),
Activation("softmax")
)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# 使用 keras 的功能
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
zca_whitening=True,
data_format="channels_first")
# compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied)
datagen.fit(train_X)
p_train_X, p_train_Y = datagen.flow(train_X, train_Y, batch_size=len(train_X), shuffle=False).next()
# 順序都沒變
assert (p_train_Y == train_Y).all()
p_test_X, p_test_Y = datagen.flow(test_X, test_Y, batch_size=len(test_X), shuffle=False).next()
# 順序都沒變
assert (p_test_Y == test_Y).all()
# 不需要這兩個
del p_train_Y, p_test_Y
p_validation_data = (p_test_X[:1000], test_Y[:1000])
p_test_data = (p_test_X[1000:], test_Y[1000:])
model.fit(p_train_X, train_Y, validation_data=p_validation_data,
batch_size=100, epochs=10)
rtn = model.evaluate(*p_test_data)
print("\ntest accuracy=", rtn[1])
```
| github_jupyter |
```
import sys
sys.path.insert(1,"/home1/07064/tg863631/anaconda3/envs/CbrainCustomLayer/lib/python3.6/site-packages") #work around for h5py
from cbrain.imports import *
from cbrain.cam_constants import *
from cbrain.utils import *
from cbrain.layers import *
from cbrain.data_generator import DataGenerator
import tensorflow as tf
from tensorflow import math as tfm
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
# import tensorflow_probability as tfp
import xarray as xr
import numpy as np
from cbrain.model_diagnostics import ModelDiagnostics
import matplotlib as mpl
import matplotlib.pyplot as plt
import matplotlib.image as imag
import scipy.integrate as sin
import matplotlib.ticker as mticker
import pickle
from tensorflow.keras import layers
from tensorflow.keras.losses import *
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
import datetime
from cbrain.climate_invariant import *
import yaml
from cbrain.imports import *
from cbrain.utils import *
from cbrain.normalization import *
import h5py
from sklearn.preprocessing import OneHotEncoder
```
## Preprocess the data and dump to a new file
```
DATA_PATH = '/fast/ankitesh/data/'
TRAINFILE = 'CI_SP_M4K_train_shuffle.nc'
VALIDFILE = 'CI_SP_M4K_valid.nc'
NORMFILE = 'CI_SP_M4K_NORM_norm.nc'
percentile_path='/export/nfs0home/ankitesg/data/percentile_data.pkl'
data_name='M4K'
bin_size = 1000
scale_dict = load_pickle('/export/nfs0home/ankitesg/CBrain_project/CBRAIN-CAM/nn_config/scale_dicts/009_Wm2_scaling.pkl')
percentile_bins = load_pickle(percentile_path)['Percentile'][data_name]
enc = OneHotEncoder(sparse=False)
classes = np.arange(bin_size+2)
enc.fit(classes.reshape(-1,1))
data_ds = xr.open_dataset(f"{DATA_PATH}{TRAINFILE}")
n = data_ds['vars'].shape[0]
data_ds
coords = list(data_ds['vars'].var_names.values)
coords = coords + ['PHQ_BIN']*30+['TPHYSTND_BIN']*30+['FSNT_BIN','FSNS_BIN','FLNT_BIN','FLNS_BIN']
def _transform_to_one_hot(Y):
'''
return shape = batch_size X 64 X bin_size
'''
Y_trans = []
out_vars = ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS']
var_dict = {}
var_dict['PHQ'] = Y[:,:30]
var_dict['TPHYSTND'] = Y[:,30:60]
var_dict['FSNT'] = Y[:,60]
var_dict['FSNS'] = Y[:,61]
var_dict['FLNT'] = Y[:,62]
var_dict['FLNS'] = Y[:,63]
perc = percentile_bins
for var in out_vars[:2]:
all_levels_one_hot = []
for ilev in range(30):
bin_index = np.digitize(var_dict[var][:,ilev],perc[var][ilev])
one_hot = enc.transform(bin_index.reshape(-1,1))
all_levels_one_hot.append(one_hot)
var_one_hot = np.stack(all_levels_one_hot,axis=1)
Y_trans.append(var_one_hot)
for var in out_vars[2:]:
bin_index = np.digitize(var_dict[var][:], perc[var])
one_hot = enc.transform(bin_index.reshape(-1,1))[:,np.newaxis,:]
Y_trans.append(one_hot)
Y_concatenated = np.concatenate(Y_trans,axis=1)
return Y_concatenated
inp_vars = ['QBP','TBP','PS', 'SOLIN', 'SHFLX', 'LHFLX']
inp_coords = coords[:64]
out_coords = coords[64:128]
bin_coords = list(range(bin_size+2))
all_data_arrays = []
batch_size = 4096
norm_ds = xr.open_dataset(f'{DATA_PATH}{NORMFILE}')
output_transform = DictNormalizer(norm_ds, ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'], scale_dict)
for i in range(0,n,batch_size):
all_vars = data_ds['vars'][i:i+batch_size]
inp_vals = all_vars[:,:64]
out_vals = all_vars[:,64:128]
out_vals = output_transform.transform(out_vals)
one_hot = _transform_to_one_hot(out_vals)
sample_coords = list(range(i,i+all_vars.shape[0]))
x3 = xr.Dataset(
{
"X": (("sample", "inp_coords"),inp_vals),
"Y_raw":(("sample","out_cords"),out_vals),
"Y": (("sample", "out_coords","bin_index"), one_hot),
},
coords={"sample": sample_coords, "inp_coords": inp_coords,"out_coords":out_coords,"bin_index":bin_coords},
)
all_data_arrays.append(x3)
if(int(i/batch_size+1)%100 == 0):
print("saving this batch")
final_da = xr.combine_by_coords(all_data_arrays)
final_da.to_netcdf(f'/scratch/ankitesh/data/new_data_for_v2_{int(i/batch_size+1)}.nc')
all_data_arrays = []
print(int(i/batch_size), end='\r')
final_da = xr.combine_by_coords(all_data_arrays)
final_da
data_ds = xr.open_dataset(f"{DATA_PATH}{VALIDFILE}")
n = data_ds['vars'].shape[0]
coords = list(data_ds['vars'].var_names.values)
coords = coords + ['PHQ_BIN']*30+['TPHYSTND_BIN']*30+['FSNT_BIN','FSNS_BIN','FLNT_BIN','FLNS_BIN']
all_data_arrays = []
batch_size = 4096
norm_ds = xr.open_dataset(f'{DATA_PATH}{NORMFILE}')
output_transform = DictNormalizer(norm_ds, ['PHQ','TPHYSTND','FSNT', 'FSNS', 'FLNT', 'FLNS'], scale_dict)
for i in range(0,n,batch_size):
all_vars = data_ds['vars'][i:i+batch_size]
inp_vals = all_vars[:,:64]
out_vals = all_vars[:,64:128]
out_vals = output_transform.transform(out_vals)
one_hot = _transform_to_one_hot(out_vals)
sample_coords = list(range(i,i+all_vars.shape[0]))
x3 = xr.Dataset(
{
"X": (("sample", "inp_coords"),inp_vals),
"Y_raw":(("sample","out_cords"),out_vals),
"Y": (("sample", "out_coords","bin_index"), one_hot),
},
coords={"sample": sample_coords, "inp_coords": inp_coords,"out_coords":out_coords,"bin_index":bin_coords},
)
all_data_arrays.append(x3)
if(int(i/batch_size+1)%100 == 0):
print("saving this batch")
final_da = xr.combine_by_coords(all_data_arrays)
final_da.to_netcdf(f'/scratch/ankitesh/data/new_data_valid_for_v2_{int(i/batch_size+1)}.nc')
all_data_arrays = []
print(int(i/batch_size), end='\r')
final_da = xr.combine_by_coords(all_data_arrays)
final_da.to_netcdf(f'/scratch/ankitesh/data/new_data_valid_for_v2_{int(i/batch_size+1)}.nc')
```
| github_jupyter |
# Docutils
## Presentation
Click [__here__] (youtube link) for the video presentation
## Summary of Support Files
- `demo.ipynb`: the notebook containing this tutorial code
- `test.csv`: a small file data used in the tutorial code
## Installation Instructions
Use `!pip install docutils` to install the `docutils` package. Next, use `import docutils`to import the package into your notebook.
For example, to import specific modules from `docutils` package use the following line of code:
`from docutils import core, io`
Below is a list of modules and subpackages as apart of the `docutils` package:
## Guide
__docutils 0.17.1 version__
- Author: David Goodger
- Contact: [email protected]
[Docutils](https://pypi.org/project/docutils/) is an open-source, modular text processing system for processing plaintext documentation into a more useful format. Formats include HTML, man-pages, OpenDocument, LaTeX, or XML.
Docutils supports reStructuredText for input, an easy-to-read, what-you-see-is-what-you-get plaintext markup syntax.
Docutils is short for "Python Documentation Utilities".
Support for the following sources has been implemented:
- Standalone files
- `PEPs (Python Enhancement Proposals)`
Support for these sources is currently being developed:
- Inline documentation
- Wikis
- Email and more
Docutils Distribution Consists of:
- the `docutils` package (or library)
- front-end tools
- test suite
- documentation.
## Notable docutils Modules & Subpackages
-----------------------------
Module | Definition
------------- | -------------
__core__ | Contains the ``Publisher`` class and ``publish_()``convenience functions
__io__ | Provides a uniform API for low-level input and output
__nodes__ | Docutils document tree (doctree) node class library
-----------------------------
Subpackages | Definition
------------- | -------------
**languages** | Language-specific mappings of terms
**parsers** | Syntax-specific input parser modules or packages
**readers** | Context-specific input handlers which understand the data source and manage a parser
Below is an overview of the `docutils` package:

## Main Use Applications of Package
The reStructured Text component of the `docutils` package makes it easy to convert between different formats, especially from plain text to a static website. It is unique because it is extensible. Better than simpler markups.
Additionally, users can pair `docutils` with `Sphinx` to convert text to html. The `Sphinx` package is built on the `docutils` package. The docutils parser creates the parse tree as a representation of the text in the memory for the Sphinx application and .rst environment.
| github_jupyter |
```
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.spatial.transform import Rotation as R
import copy
# Let's write an expanded-ensemble Sampler() class
class EESampler_RigidThreeParticle(object):
"""An expanded-ensemble Sampler class for a rigid-triangle 3-particle/3-restraint system."""
def __init__(self, k_values=[0.0, 0.5, 1.0, 2.0, 5.0, 10., 15., 20., 50., 100., 200., 400., 800.], L=8.0,
x0=np.array([[0.,0.,0.],[1.,0.,0.], [2.,1.,0.]]),
a1=np.array([[0.,0.,0.],[1.,0.,0.], [2.,1.,0.]])):
"""Initialize the class.
INPUT
k -- a np.array() of force constant values (kJ/nm^2)
L -- the length of the cubic box (nm), extending from -L/2 to L/2.
x0 -- the initial positions of the particles (np.array of shape (3,3))
a1 -- position of the harmonic anchors (np.array of shape (3,3))
"""
self.k_values = np.array(k_values) # kJ/mol/nm^2)
self.L = L # nm
self.x0 = x0 # np.array with shape (3,3) and units nm
self.a1 = a1 # "
# Calculate distance between particles 1 and 2
self.d0 = self.distance(x0[0,:], x0[1,:]) # the initial distance between p2 - p1, as a reference
self.d = copy.copy(self.d0)
print('self.d', self.d)
# Calculate the altitude (height), c, of triangle where p1-p2 is the base
### 1. First calculate the area of the triangle as A = (1/2)||v \cross w||
p1, p2, p3 = x0[0,:], x0[1,:], x0[2,:]
v, w = p2-p1, p3-p1
area = 0.5*np.linalg.norm( np.cross(v,w) )
### 2. Then since A = 1/2 * base * height, the height is c = 2*area/base
self.c0 = 2.0 * area / np.linalg.norm(v)
self.c = copy.copy(self.c0)
print('self.c', self.c)
# calculate e, the projection of p3-p1 = w in the p2-p1 = v direction
unit_vec_along_p12 = v / self.d
self.e = np.abs(np.dot(w,unit_vec_along_p12))
print('self.e', self.e)
self.d3 = np.linalg.norm(w) # the initial distance between p3 - p1, as a reference
self.k_index = 0 # thermodynamic index
self.n_ensembles = self.k_values.shape[0]
self.x = copy.copy(self.x0)
self.U = self.energy(self.x, self.k_values[self.k_index])
### Monte Carlo and Wang-Landau (WL) settings
self.dx = 0.2 # Gaussian translation step size
self.dtheta = 0.2 # Gaussian step size for angular (in radians)
self.RT = 2.479 # in kJ/mol at 298 K
self.all_pbc_shifts = []
for i in [-L, 0, L]:
for j in [-L, 0, L]:
for k in [-L, 0, L]:
self.all_pbc_shifts.append([i,j,k])
self.all_pbc_shifts = np.array(self.all_pbc_shifts)
# print('self.all_pbc_shifts', self.all_pbc_shifts)
self.wl_increment = 5.0 # in kT
self.wl_scaling = 0.5 # (0.5 is a la R. E. Belardinelli, and V. D. Pereyra, JCP 2007), 0.8 is MRS
self.flatness = 0.8 # if all histogram values are within flatness*100
# percent of the mean counts, decrease wl_increment and reset histogram
self.wl_increment_freq = 10 # frequency to update wl sampling
self.g = np.zeros(self.n_ensembles) # bias energies
self.h = np.zeros(self.n_ensembles) # histogram counts
self.use_1_over_t = False # switch to updating increment as c_1_over_t/(nsteps) when
self.c_1_over_t = 10.0 # the wl_increment < c_1_over_t/(nsteps)
# (a la R. E. Belardinelli, and V. D. Pereyra, JCP 2007)
self.has_switched_to_1_over_t = False
self.steps_before_1_over_t = 10000 # wait at least these number of steps before switching to 1/t
self.print_every = 10000
self.traj_every = 100
def theory_dg_in_kT(self, verbose=True):
"""Returns the theoretical value for the 3-restraint potential at all k_values."""
if verbose:
print('self.d', self.d)
print('self.c', self.c)
print('self.e', self.e)
print('self.L', self.L)
kc_coeff = 1.0 + (self.e/self.d)**2.0
if verbose:
print('kc_coeff', kc_coeff)
kp1_coeff = 1.0 + (self.c**2 + self.e**2)/(self.d**2.0)
if verbose:
print('kp1_coeff', kp1_coeff)
'''
theory_dG_in_kT = -1.0*( 3.0/2.0*np.log(2.0*np.pi*ee.RT/(3.0*ee.k_values[1:])) \ # translational
+ 1.0/2.0*np.log(2.0*np.pi*ee.RT/(2.0*ee.k_values[1:])) \ # rot about d
+ 1.0/2.0*np.log(2.0*np.pi*ee.RT/(kc_coeff*ee.k_values[1:])) \ # rot about c
+ 1.0/2.0*np.log(2.0*np.pi*ee.RT/(kp1_coeff*ee.k_values[1:])) \ # rot out of page
- np.log( ee.L**3 * 8.0 * (np.pi**2) * (ee.d)**2 * ee.c ) )
'''
theory_dG_in_kT = -1.0*( 3.0/2.0*np.log(2.0*np.pi*self.RT/(3.0*self.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*self.RT/(self.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*self.RT/(kc_coeff*self.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*self.RT/(kp1_coeff*self.k_values[1:])) \
- np.log( self.L**3 * 8.0 * (np.pi**2) * (self.d)**2 * self.c ) )
return theory_dG_in_kT
def energy(self, x, k):
"""Returns the energy of the harmonic potential in units kJ/mol, for position x."""
return 0.5*k* np.sum((x-self.a1)*(x-self.a1))
def distance(self, p1, p2):
"""Return the distance between two particles (np.array of shape (3,))"""
return np.sqrt( np.dot(p2-p1,p2-p1) )
def sample(self, nsteps, verbose=True):
"""Perform WL expanded ensemble sampling with MC position moves."""
# store transition counts
T_counts = np.zeros( (self.n_ensembles, self.n_ensembles) )
accepted = 0
data = []
for step in range(nsteps):
########################
### MC moves
# Before we do any moves, we need to reassemble the rod if it has
# disjointed from any periodic box separations.
p1, p2, p3 = self.x[0,:], self.x[1,:], self.x[2,:]
tol = 1e-3
# print('p1, p2', p1, p2, 'self.distance(p1, p2)', self.distance(p1, p2), 'self.d', self.d)
p2_distance_deviation = np.abs(self.distance(p1, p2) - self.d0)
p3_distance_deviation = np.abs(self.distance(p1, p3) - self.d3)
if p2_distance_deviation + p3_distance_deviation > tol:
# print(step, 'points out of the box. Reassembling...')
## Find all images of the second and third particles
all_images_p2 = np.tile(p2, 27).reshape(27, 3) + self.all_pbc_shifts
all_images_p3 = np.tile(p3, 27).reshape(27, 3) + self.all_pbc_shifts
### select the image that is the closest to particle 1 (note: this only works of self.d < L/2)
p2_displacements = all_images_p2 - np.tile(p1, 27).reshape(27, 3)
p3_displacements = all_images_p3 - np.tile(p1, 27).reshape(27, 3)
# print('displacements', displacements)
p2_sq_distances = np.sum(p2_displacements * p2_displacements, axis=1)
p3_sq_distances = np.sum(p3_displacements * p3_displacements, axis=1)
# print('sq_distances', sq_distances)
p2_closest_index = np.argmin(p2_sq_distances)
p2 = all_images_p2[p2_closest_index,:]
p3_closest_index = np.argmin(p3_sq_distances)
p3 = all_images_p3[p3_closest_index,:]
x_assembled = np.array([list(p1),list(p2),list(p3)])
# print('step', step, 'x_assembled', x_assembled)
# For debugging, store the distance betwen the poarticles for the reassembled rod
self.d = self.distance(x_assembled[0,:], x_assembled[1,:])
# For debugging, store the triangle altitude c
v, w = p2-p1, p3-p1
self.c = np.linalg.norm( np.cross(v,w) ) / np.linalg.norm(v)
# propose a MC translation move
nudge = np.random.randn(3)
x_new = x_assembled + self.dx*np.tile(nudge, 3).reshape(3,3)
# propose a rotation move
## get a random vector on the unit sphere
vec = 2.0*np.random.random(3) - 1.0
while np.dot(vec,vec) > 1.0:
vec = 2.0*np.random.random(3) - 1.0
vec = vec/np.sqrt( np.dot(vec,vec) )
## Find the rotation transformation about this axis
rotation_radians = self.dtheta*np.random.randn()
rotation_vector = rotation_radians * vec
rotation = R.from_rotvec(rotation_vector)
## Translate the rigid rod to the origin
midpoint = (x_new[0,:] + x_new[1,:] + x_new[2,:])/3.0
x_new -= np.tile(midpoint, 3).reshape(3,3)
## Rotate it
x_new_rotated = rotation.apply(x_new)
# print(x_new_rotated)
## Translate it back
x_new_rotated += np.tile(midpoint, 3).reshape(3,3)
# if any coordinate of x_new goes outside the box, wrap it back in:
x_new = ((x_new_rotated + np.array([self.L/2., self.L/2., self.L/2.])) % self.L) - np.array([self.L/2., self.L/2., self.L/2.])
U_new = self.energy(x_new, self.k_values[self.k_index])
# print('step', step, 'self.x', 'x_new', self.x, x_new)
# print('\tself.U', 'U_new', self.U, U_new)
# accept MC move according to the metropolis criterion
accept = False
P_accept = min(1., np.exp(-(U_new - self.U)/self.RT))
if np.random.rand() < P_accept:
accept = True
if accept:
accepted += 1
self.x = x_new
self.U = U_new
########################
### WL moves
### next, update the WL histogams and propose a WL move
if (step%self.wl_increment_freq == 1):
# WL histograms
self.g[self.k_index] -= self.wl_increment
self.h[self.k_index] += 1.0
# reset the bias to i=0 reference
self.g -= self.g[0]
# attempt a move to a neighboring ensemble
wl_accept = False
if np.random.rand() < 0.5:
k_index_new = self.k_index + 1
else:
k_index_new = self.k_index - 1
if (k_index_new >= 0) and (k_index_new < self.n_ensembles):
energies = self.energy(self.x, self.k_values)
P_wl_accept = min(1., np.exp( -(energies[k_index_new]/self.RT - self.g[k_index_new] - energies[self.k_index]/self.RT + self.g[self.k_index] )))
if np.random.rand() < P_wl_accept: #print(f'2 - is now {particles[x]}')
wl_accept = True
if wl_accept:
T_counts[self.k_index, k_index_new] += 1.0
self.k_index = k_index_new
else:
T_counts[self.k_index,self.k_index] += 1.0
# check if the histogram is flat enough
mean_counts = self.h.mean()
which_are_flat_enough = (self.h > self.flatness*mean_counts)*(self.h < (2.0-self.flatness)*mean_counts)
if step%self.print_every == 0:
print('h', self.h, 'mean_counts', mean_counts)
print('which_are_flat_enough', which_are_flat_enough)
if step >= self.steps_before_1_over_t:
if (self.has_switched_to_1_over_t == False) and (self.wl_increment < self.c_1_over_t/float(step)):
print('#### Switching to 1/t method ( wl_increment =', self.wl_increment, ') ####')
self.has_switched_to_1_over_t = True
if np.sum( which_are_flat_enough.astype(int) ) == self.n_ensembles:
if self.has_switched_to_1_over_t:
self.wl_increment = self.c_1_over_t/float(step)
else:
self.wl_increment *= self.wl_scaling
self.h = np.zeros(self.n_ensembles)
# print a status report
if step%self.print_every == 0:
print('step', step, '| λ index', self.k_index, '| wl_increment =', self.wl_increment, 'kT')
if not verbose: # just post first and last ensemble info
print('%8d\t%8d\t%3.4f'%(0, self.h[0], self.g[0]))
print('%8d\t%8d\t%3.4f'%(self.n_ensembles, self.h[-1], self.g[-1]))
if verbose:
print('# ensemble\tk_value\thistogram\tg (kT)')
for k in range(self.n_ensembles):
outstr = '%8d\t%4.1f\t%8d\t%3.4f'%(k, self.k_values[k], self.h[k], self.g[k])
if k == self.k_index:
outstr += ' <<'
print(outstr)
print()
# store sample in trajectory
if step%self.traj_every == 0:
dhdl = self.energy(self.x, self.k_values) - self.energy(self.x, self.k_index)
data.append([step, self.k_index,
self.x[0,0], self.x[0,1], self.x[0,2],
self.x[1,0], self.x[1,1], self.x[1,2],
self.x[2,0], self.x[2,1], self.x[2,2],
self.d, self.c,
self.wl_increment, self.g[-1], dhdl, T_counts])
trajectory = pd.DataFrame(data, columns=['step', 'lambda',
'x1', 'y1', 'z1', 'x2', 'y2', 'z2', 'x3', 'y3', 'z3', 'distance', 'height',
'increment','free_energy','dhdl', 'transition_matrix'])
print(accepted)
return trajectory
ee = EESampler_RigidThreeParticle()
ee = EESampler_RigidThreeParticle()
nsteps = 2000000
traj = ee.sample(nsteps)
# print(traj['dhdl'])
step = traj.loc[:,'step'].values
p1 = np.zeros((step.shape[0], 3))
p1[:,0] = traj.loc[:,'x1'].values
p1[:,1] = traj.loc[:,'y1'].values
p1[:,2] = traj.loc[:,'z1'].values
p2 = np.zeros((step.shape[0], 3))
p2[:,0] = traj.loc[:,'x2'].values
p2[:,1] = traj.loc[:,'y2'].values
p2[:,2] = traj.loc[:,'z2'].values
d = traj.loc[:,'distance'].values
c = traj.loc[:,'height'].values
import matplotlib
from matplotlib import pyplot as plt
plt.figure(figsize=(10,4))
plt.subplot(1,3,1)
plt.plot(step, p1)
plt.plot(step, p2)
plt.subplot(1,3,2)
plt.plot(step, d)
plt.subplot(1,3,3)
plt.plot(step, c)
### compare result with experiment
ee_temp = EESampler_RigidThreeParticle()
theory_dG_in_kT = ee_temp.theory_dg_in_kT()
print('theory_dG_in_kT', theory_dG_in_kT)
print('ee.g',ee.g)
plt.figure(figsize=(10,4))
# Plot the final free energy estimates as a function of k
plt.subplot(1,2,1)
plt.plot(ee.k_values, ee.g, 'o-', label='EE result')
plt.plot(ee.k_values[1:], theory_dG_in_kT, 'o-', label='theory')
plt.xlabel('$k$ (kJ/nm$^2$)')
plt.ylabel('$\Delta G_{rest}$ (kT)')
#lt.xscale('log')
plt.legend(loc='best')
# Plot the convergence of the free energy
step = traj.loc[:,'step'].values
free_energy = traj.loc[:,'free_energy'].values
plt.subplot(1,2,2)
plt.plot(step, free_energy, 'r-')
plt.xlabel('step')
plt.ylabel('$\Delta G_{rest}$ (kT)')
# plt.legend(loc='best')
```
### Let's try parameters from an actual simulation: REF_RL/RUN0
```
"""
[tud67309@login2 RUN0]$ cat LIG_res.itp
;LIG_res.itp
[ position_restraints ]
;i funct fcx fcy fcz
6 1 800 800 800
35 1 800 800 800
23 1 800 800 800
"""
# From LIG_h.pdb:
"""
ATOM 6 C28 LIG A 1 11.765 -16.536 1.909 1.00 0.00 C
...
ATOM 23 C23 LIG A 1 12.358 -10.647 7.766 1.00 0.00 C
...
ATOM 35 C17 LIG A 1 20.883 -7.674 2.314 1.00 0.00 C
"""
x0 = np.zeros((3,3))
x0[0,:] = np.array([1.1765, -1.6536, 0.1909]) # converted to nm
x0[1,:] = np.array([1.2358, -1.0647, 0.7766]) # converted to nm
x0[2,:] = np.array([2.0883, -0.7674, 0.2314]) # converted to nm
V0 = 1660.0 # in Å^3 is the standard volume
L0 = ((1660.0)**(1/3)) * 0.1 # converted to nm
print('L0', L0, 'nm')
ee_REF0 = EESampler_RigidThreeParticle(L=L0, x0=x0, a1=x0)
theory_dG_in_kT = ee_REF0.theory_dg_in_kT()
print('theory_dG_in_kT', theory_dG_in_kT)
print('ee_REF0.g', ee_REF0.g)
plt.figure(figsize=(10,4))
# Plot the final free energy estimates as a function of k
plt.subplot(1,2,1)
#plt.plot(ee.k_values, ee.g, 'o-', label='EE result')
plt.plot(ee_REF0.k_values[1:], theory_dG_in_kT, 'o-', label='theory')
plt.xlabel('$k$ (kJ/nm$^2$)')
plt.ylabel('$\Delta G_{rest}$ (kT)')
plt.legend(loc='best')
```
### REF_RL/RUN1
```
"""
$ cat LIG_res.itp
;LIG_res.itp
[ position_restraints ]
;i funct fcx fcy fcz
11 1 800 800 800
25 1 800 800 800
2 1 800 800 800
"""
# From LIG_h.pdb:
"""
ATOM 2 C12 LIG A 1 6.050 0.774 17.871 1.00 0.00 C
ATOM 11 C15 LIG A 1 2.770 2.355 21.054 1.00 0.00 C
ATOM 25 C4 LIG A 1 13.466 -1.210 22.191 1.00 0.00 C
"""
x0 = np.zeros((3,3))
x0[0,:] = np.array([0.6050, 0.0774, 1.7871]) # converted to nm
x0[1,:] = np.array([0.2770, 0.2355, 2.1054]) # converted to nm
x0[2,:] = np.array([1.3466, -0.1210, 2.2191]) # converted to nm
V0 = 1660.0 # in Å^3 is the standard volume
L0 = ((1660.0)**(1/3)) * 0.1 # converted to nm
print('L0', L0, 'nm')
ee_REF1 = EESampler_RigidThreeParticle(L=L0, x0=x0, a1=x0)
# The *theory* for the triple-restraint rigid rotor says that
# dG/kT = -ln ( [(2*\pi)/(\beta *3k )]^{3/2} / L^3
# [(2*\pi)/(\beta*2k))]^{1/2} * [(2*\pi)/(\beta *(2+c^2/(d/2)^2)k )]^{1/2} / 4 \pi (d/2)^2
# [(2*\pi)/(\beta *k )]^{1/2} / 2 \pi c
print('ee_REF1.d', ee_REF1.d)
print('ee_REF1.c', ee_REF1.c)
print('ee_REF1.L', ee_REF1.L)
k_prime_coeff = 2.0 + (ee.c/(ee.d/2.))**2.0
print('k_prime_coeff', k_prime_coeff)
theory_dG_in_kT = -1.0*( 3.0/2.0*np.log(2.0*np.pi*ee_REF1.RT/(3.0*ee_REF1.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee_REF1.RT/(2.0*ee_REF1.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee_REF1.RT/(k_prime_coeff*ee_REF0.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee_REF1.RT/ee_REF1.k_values[1:]) \
- np.log( ee_REF1.L**3 * 8.0 * (np.pi**2) * (ee_REF1.d/2.0)**2 * ee_REF1.c ) )
print('theory_dG_in_kT', theory_dG_in_kT)
print('ee_REF1.g', ee_REF1.g)
plt.figure(figsize=(10,4))
# Plot the final free energy estimates as a function of k
plt.subplot(1,2,1)
#plt.plot(ee.k_values, ee.g, 'o-', label='EE result')
plt.plot(ee_REF1.k_values[1:], theory_dG_in_kT, 'o-', label='theory')
plt.xlabel('$k$ (kJ/nm$^2$)')
plt.ylabel('$\Delta G_{rest}$ (kT)')
plt.legend(loc='best')
```
### REF_RL/RUN2
```
""";LIG_res.itp
[ position_restraints ]
;i funct fcx fcy fcz
13 1 800 800 800
19 1 800 800 800
9 1 800 800 800
"""
# From LIG_h.pdb:
"""
ATOM 9 C2 LIG A 1 12.189 0.731 23.852 1.00 0.00 C
ATOM 13 CL LIG A 1 14.006 -1.527 21.119 1.00 0.00 Cl
ATOM 19 C13 LIG A 1 3.244 2.176 20.610 1.00 0.00 C
"""
x0 = np.zeros((3,3))
x0[0,:] = 0.1*np.array([12.189, 0.731, 23.852]) # converted to nm
x0[1,:] = 0.1*np.array([14.006, -1.527, 21.119]) # converted to nm
x0[2,:] = 0.1*np.array([ 3.244, 2.176, 20.610]) # converted to nm
V0 = 1660.0 # in Å^3 is the standard volume
L0 = ((1660.0)**(1/3)) * 0.1 # converted to nm
print('L0', L0, 'nm')
ee_REF2 = EESampler_RigidThreeParticle(L=L0, x0=x0, a1=x0)
# The *theory* for the triple-restraint rigid rotor says that
# dG/kT = -ln ( [(2*\pi)/(\beta *3k )]^{3/2} / L^3
# [(2*\pi)/(\beta*2k))]^{1/2} * [(2*\pi)/(\beta *(2+c^2/(d/2)^2)k )]^{1/2} / 4 \pi (d/2)^2
# [(2*\pi)/(\beta *k )]^{1/2} / 2 \pi c
print('ee_REF1.d', ee_REF2.d)
print('ee_REF1.c', ee_REF2.c)
print('ee_REF1.L', ee_REF2.L)
k_prime_coeff = 2.0 + (ee.c/(ee.d/2.))**2.0
print('k_prime_coeff', k_prime_coeff)
theory_dG_in_kT = -1.0*( 3.0/2.0*np.log(2.0*np.pi*ee_REF2.RT/(3.0*ee_REF2.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee_REF2.RT/(2.0*ee_REF2.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee_REF2.RT/(k_prime_coeff*ee_REF0.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee_REF2.RT/ee_REF2.k_values[1:]) \
- np.log( ee_REF2.L**3 * 8.0 * (np.pi**2) * (ee_REF2.d/2.0)**2 * ee_REF2.c ) )
print('theory_dG_in_kT', theory_dG_in_kT)
print('ee_REF2.g', ee_REF2.g)
plt.figure(figsize=(10,4))
# Plot the final free energy estimates as a function of k
plt.subplot(1,2,1)
#plt.plot(ee.k_values, ee.g, 'o-', label='EE result')
plt.plot(ee_REF2.k_values[1:], theory_dG_in_kT, 'o-', label='theory')
plt.xlabel('$k$ (kJ/nm$^2$)')
plt.ylabel('$\Delta G_{rest}$ (kT)')
plt.legend(loc='best')
nsteps = 100000
traj = ee.sample(nsteps)
# print(traj['dhdl'])
step = traj.loc[:,'step'].values
p1 = np.zeros((step.shape[0], 3))
p1[:,0] = traj.loc[:,'x1'].values
p1[:,1] = traj.loc[:,'y1'].values
p1[:,2] = traj.loc[:,'z1'].values
p2 = np.zeros((step.shape[0], 3))
p2[:,0] = traj.loc[:,'x2'].values
p2[:,1] = traj.loc[:,'y2'].values
p2[:,2] = traj.loc[:,'z2'].values
d = traj.loc[:,'distance'].values
c = traj.loc[:,'height'].values
import matplotlib
from matplotlib import pyplot as plt
plt.figure(figsize=(10,4))
plt.subplot(1,3,1)
plt.plot(step, p1)
plt.plot(step, p2)
plt.subplot(1,3,2)
plt.plot(step, d)
plt.subplot(1,3,3)
plt.plot(step, c)
# The *theory* for the triple-restraint rigid rotor says that
# dG/kT = -ln ( [(2*\pi)/(\beta *3k )]^{3/2} / L^3
# [(2*\pi)/(\beta*2k))]^{1/2} * [(2*\pi)/(\beta *(2+c^2/(d/2)^2)k )]^{1/2} / 4 \pi (d/2)^2
# [(2*\pi)/(\beta *k )]^{1/2} / 2 \pi c
print('ee.d', ee.d)
print('ee.c', ee.c)
print('ee.L', ee.L)
k_prime_coeff = 2.0 + (ee.c/(ee.d/2.))**2.0
print('k_prime_coeff', k_prime_coeff)
theory_dG_in_kT = -1.0*( 3.0/2.0*np.log(2.0*np.pi*ee.RT/(3.0*ee.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee.RT/(2.0*ee.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee.RT/(k_prime_coeff*ee.k_values[1:])) \
+ 1.0/2.0*np.log(2.0*np.pi*ee.RT/ee.k_values[1:]) \
- np.log( ee.L**3 * 8.0 * (np.pi**2) * (ee.d/2.0)**2 * ee.c ) )
print('theory_dG_in_kT', theory_dG_in_kT)
print('ee.g',ee.g)
plt.figure(figsize=(10,4))
# Plot the final free energy estimates as a function of k
plt.subplot(1,2,1)
#plt.plot(ee.k_values, ee.g, 'o-', label='EE result')
plt.plot(ee.k_values[1:], theory_dG_in_kT, 'o-', label='theory')
plt.xlabel('$k$ (kJ/nm$^2$)')
plt.ylabel('$\Delta G_{rest}$ (kT)')
plt.legend(loc='best')
# Plot the convergence of the free energy
step = traj.loc[:,'step'].values
free_energy = traj.loc[:,'free_energy'].values
plt.subplot(1,2,2)
plt.plot(step, free_energy, 'r-')
plt.xlabel('step')
plt.ylabel('$\Delta G_{rest}$ (kT)')
# plt.legend(loc='best')
```
| github_jupyter |
# Creating a Sentiment Analysis Web App
## Using PyTorch and SageMaker
_Deep Learning Nanodegree Program | Deployment_
---
Now that we have a basic understanding of how SageMaker works we will try to use it to construct a complete project from end to end. Our goal will be to have a simple web page which a user can use to enter a movie review. The web page will then send the review off to our deployed model which will predict the sentiment of the entered review.
## Instructions
Some template code has already been provided for you, and you will need to implement additional functionality to successfully complete this notebook. You will not need to modify the included code beyond what is requested. Sections that begin with '**TODO**' in the header indicate that you need to complete or implement some portion within them. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `# TODO: ...` comment. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions for you to answer which relate to the task and your implementation. Each section where you will answer a question is preceded by a '**Question:**' header. Carefully read each question and provide your answer below the '**Answer:**' header by editing the Markdown cell.
> **Note**: Code and Markdown cells can be executed using the **Shift+Enter** keyboard shortcut. In addition, a cell can be edited by typically clicking it (double-click for Markdown cells) or by pressing **Enter** while it is highlighted.
## General Outline
Recall the general outline for SageMaker projects using a notebook instance.
1. Download or otherwise retrieve the data.
2. Process / Prepare the data.
3. Upload the processed data to S3.
4. Train a chosen model.
5. Test the trained model (typically using a batch transform job).
6. Deploy the trained model.
7. Use the deployed model.
For this project, you will be following the steps in the general outline with some modifications.
First, you will not be testing the model in its own step. You will still be testing the model, however, you will do it by deploying your model and then using the deployed model by sending the test data to it. One of the reasons for doing this is so that you can make sure that your deployed model is working correctly before moving forward.
In addition, you will deploy and use your trained model a second time. In the second iteration you will customize the way that your trained model is deployed by including some of your own code. In addition, your newly deployed model will be used in the sentiment analysis web app.
## Step 1: Downloading the data
As in the XGBoost in SageMaker notebook, we will be using the [IMDb dataset](http://ai.stanford.edu/~amaas/data/sentiment/)
> Maas, Andrew L., et al. [Learning Word Vectors for Sentiment Analysis](http://ai.stanford.edu/~amaas/data/sentiment/). In _Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies_. Association for Computational Linguistics, 2011.
```
%mkdir ../data
!wget -O ../data/aclImdb_v1.tar.gz http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz
!tar -zxf ../data/aclImdb_v1.tar.gz -C ../data
```
## Step 2: Preparing and Processing the data
Also, as in the XGBoost notebook, we will be doing some initial data processing. The first few steps are the same as in the XGBoost example. To begin with, we will read in each of the reviews and combine them into a single input structure. Then, we will split the dataset into a training set and a testing set.
```
import os
import glob
def read_imdb_data(data_dir='../data/aclImdb'):
data = {}
labels = {}
for data_type in ['train', 'test']:
data[data_type] = {}
labels[data_type] = {}
for sentiment in ['pos', 'neg']:
data[data_type][sentiment] = []
labels[data_type][sentiment] = []
path = os.path.join(data_dir, data_type, sentiment, '*.txt')
files = glob.glob(path)
for f in files:
with open(f) as review:
data[data_type][sentiment].append(review.read())
# Here we represent a positive review by '1' and a negative review by '0'
labels[data_type][sentiment].append(1 if sentiment == 'pos' else 0)
assert len(data[data_type][sentiment]) == len(labels[data_type][sentiment]), \
"{}/{} data size does not match labels size".format(data_type, sentiment)
return data, labels
data, labels = read_imdb_data()
print("IMDB reviews: train = {} pos / {} neg, test = {} pos / {} neg".format(
len(data['train']['pos']), len(data['train']['neg']),
len(data['test']['pos']), len(data['test']['neg'])))
```
Now that we've read the raw training and testing data from the downloaded dataset, we will combine the positive and negative reviews and shuffle the resulting records.
```
from sklearn.utils import shuffle
def prepare_imdb_data(data, labels):
"""Prepare training and test sets from IMDb movie reviews."""
#Combine positive and negative reviews and labels
data_train = data['train']['pos'] + data['train']['neg']
data_test = data['test']['pos'] + data['test']['neg']
labels_train = labels['train']['pos'] + labels['train']['neg']
labels_test = labels['test']['pos'] + labels['test']['neg']
#Shuffle reviews and corresponding labels within training and test sets
data_train, labels_train = shuffle(data_train, labels_train)
data_test, labels_test = shuffle(data_test, labels_test)
# Return a unified training data, test data, training labels, test labets
return data_train, data_test, labels_train, labels_test
train_X, test_X, train_y, test_y = prepare_imdb_data(data, labels)
print("IMDb reviews (combined): train = {}, test = {}".format(len(train_X), len(test_X)))
```
Now that we have our training and testing sets unified and prepared, we should do a quick check and see an example of the data our model will be trained on. This is generally a good idea as it allows you to see how each of the further processing steps affects the reviews and it also ensures that the data has been loaded correctly.
```
print(train_X[100])
print(train_y[100])
```
The first step in processing the reviews is to make sure that any html tags that appear should be removed. In addition we wish to tokenize our input, that way words such as *entertained* and *entertaining* are considered the same with regard to sentiment analysis.
```
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import *
import re
from bs4 import BeautifulSoup
def review_to_words(review):
nltk.download("stopwords", quiet=True)
stemmer = PorterStemmer()
text = BeautifulSoup(review, "html.parser").get_text() # Remove HTML tags
text = re.sub(r"[^a-zA-Z0-9]", " ", text.lower()) # Convert to lower case
words = text.split() # Split string into words
words = [w for w in words if w not in stopwords.words("english")] # Remove stopwords
words = [PorterStemmer().stem(w) for w in words] # stem
return words
import re
REPLACE_NO_SPACE = re.compile("(\.)|(\;)|(\:)|(\!)|(\')|(\?)|(\,)|(\")|(\()|(\))|(\[)|(\])")
REPLACE_WITH_SPACE = re.compile("(<br\s*/><br\s*/>)|(\-)|(\/)")
def review_to_words_2(review):
words = REPLACE_NO_SPACE.sub("", review.lower())
words = REPLACE_WITH_SPACE.sub(" ", words)
return words
```
The `review_to_words` method defined above uses `BeautifulSoup` to remove any html tags that appear and uses the `nltk` package to tokenize the reviews. As a check to ensure we know how everything is working, try applying `review_to_words` to one of the reviews in the training set.
```
# TODO: Apply review_to_words to a review (train_X[100] or any other review)
review_to_words(train_X[100])
```
**Question:** Above we mentioned that `review_to_words` method removes html formatting and allows us to tokenize the words found in a review, for example, converting *entertained* and *entertaining* into *entertain* so that they are treated as though they are the same word. What else, if anything, does this method do to the input?
**Answer:**1)remove any non-alpha numeric characters (like punctuation), which are not useful, to improve the accuracy 2) remove all the Remove all the predefined english stopwords, like is, a, of, the, to, his......which are neutral words,to improve the accuracy 3) convert all in lower cases 4) get the only stem parts of the words
The method below applies the `review_to_words` method to each of the reviews in the training and testing datasets. In addition it caches the results. This is because performing this processing step can take a long time. This way if you are unable to complete the notebook in the current session, you can come back without needing to process the data a second time.
```
import pickle
cache_dir = os.path.join("../cache", "sentiment_analysis") # where to store cache files
os.makedirs(cache_dir, exist_ok=True) # ensure cache directory exists
def preprocess_data(data_train, data_test, labels_train, labels_test,
cache_dir=cache_dir, cache_file="preprocessed_data.pkl"):
"""Convert each review to words; read from cache if available."""
# If cache_file is not None, try to read from it first
cache_data = None
if cache_file is not None:
try:
with open(os.path.join(cache_dir, cache_file), "rb") as f:
cache_data = pickle.load(f)
print("Read preprocessed data from cache file:", cache_file)
except:
pass # unable to read from cache, but that's okay
# If cache is missing, then do the heavy lifting
if cache_data is None:
# Preprocess training and test data to obtain words for each review
#words_train = list(map(review_to_words, data_train))
#words_test = list(map(review_to_words, data_test))
words_train = [review_to_words(review) for review in data_train]
words_test = [review_to_words(review) for review in data_test]
# Write to cache file for future runs
if cache_file is not None:
cache_data = dict(words_train=words_train, words_test=words_test,
labels_train=labels_train, labels_test=labels_test)
with open(os.path.join(cache_dir, cache_file), "wb") as f:
pickle.dump(cache_data, f)
print("Wrote preprocessed data to cache file:", cache_file)
else:
# Unpack data loaded from cache file
words_train, words_test, labels_train, labels_test = (cache_data['words_train'],
cache_data['words_test'], cache_data['labels_train'], cache_data['labels_test'])
return words_train, words_test, labels_train, labels_test
# Preprocess data
train_X, test_X, train_y, test_y = preprocess_data(train_X, test_X, train_y, test_y)
```
## Transform the data
In the XGBoost notebook we transformed the data from its word representation to a bag-of-words feature representation. For the model we are going to construct in this notebook we will construct a feature representation which is very similar. To start, we will represent each word as an integer. Of course, some of the words that appear in the reviews occur very infrequently and so likely don't contain much information for the purposes of sentiment analysis. The way we will deal with this problem is that we will fix the size of our working vocabulary and we will only include the words that appear most frequently. We will then combine all of the infrequent words into a single category and, in our case, we will label it as `1`.
Since we will be using a recurrent neural network, it will be convenient if the length of each review is the same. To do this, we will fix a size for our reviews and then pad short reviews with the category 'no word' (which we will label `0`) and truncate long reviews.
### (TODO) Create a word dictionary
To begin with, we need to construct a way to map words that appear in the reviews to integers. Here we fix the size of our vocabulary (including the 'no word' and 'infrequent' categories) to be `5000` but you may wish to change this to see how it affects the model.
> **TODO:** Complete the implementation for the `build_dict()` method below. Note that even though the vocab_size is set to `5000`, we only want to construct a mapping for the most frequently appearing `4998` words. This is because we want to reserve the special labels `0` for 'no word' and `1` for 'infrequent word'.
```
import numpy as np
from collections import Counter
def build_dict(data, vocab_size = 5000):
"""Construct and return a dictionary mapping each of the most frequently appearing words to a unique integer."""
# TODO: Determine how often each word appears in `data`. Note that `data` is a list of sentences and that a
# sentence is a list of words.
word_count ={} # A dict storing the words that appear in the reviews along with how often they occur
####Begin: Yanfei First Try#############
#word_count =Counter(x for xs in data for x in set(xs)) # it is a sorted dictionary already, with the most frequently appearing word in word_count[0], the last frequently appearing word in word_count[-1]
# TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and
# sorted_words[-1] is the least frequently appearing word.
#words_sorted=sorted(words.items(), key=lambda x: x[1], reverse=True) # it seems not nesseary, since it is a sorted dictionary already, with the most frequently appearing word first, the last frequently appearing word last
# sorted_words = None
# sorted_words = list(word_count.keys())
####End: Yanfei First Try#############
####Begin: Yanfei Second Try############
review_words = []
for review_id in range(len(data)):
for review_word in data[review_id]:
review_words.append(review_word)
word_count = Counter(review_words)
sorted_words = sorted(word_count, key=word_count.get, reverse=True) # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and
# sorted_words[-1] is the least frequently appearing word.
###End: Yanfei Second Try###############
####Begin: Yanfei Third Try############
# word_count =Counter(x for xs in data for x in set(xs))
# sorted_words = sorted(word_count, key=word_count.get, reverse=True) # TODO: Sort the words found in `data` so that sorted_words[0] is the most frequently appearing word and
# sorted_words[-1] is the least frequently appearing word.
###End: Yanfei Third Try###############
word_dict = {} # This is what we are building, a dictionary that translates words into integers
for idx, word in enumerate(sorted_words[:vocab_size - 2]): # The -2 is so that we save room for the 'no word'
word_dict[word] = idx + 2 # 'infrequent' labels
return word_dict
word_dict = build_dict(train_X)
```
**Question:** What are the five most frequently appearing (tokenized) words in the training set? Does it makes sense that these words appear frequently in the training set?
**Answer:**['one', 'come', 'cartoon', 'long', 'minut'], yes, it make sense to see it in the movie review.
```
# TODO: Use this space to determine the five most frequently appearing words in the training set.
list(word_dict.keys())[0:5]
```
### Save `word_dict`
Later on when we construct an endpoint which processes a submitted review we will need to make use of the `word_dict` which we have created. As such, we will save it to a file now for future use.
```
data_dir = '../data/pytorch' # The folder we will use for storing data
if not os.path.exists(data_dir): # Make sure that the folder exists
os.makedirs(data_dir)
with open(os.path.join(data_dir, 'word_dict.pkl'), "wb") as f:
pickle.dump(word_dict, f)
```
### Transform the reviews
Now that we have our word dictionary which allows us to transform the words appearing in the reviews into integers, it is time to make use of it and convert our reviews to their integer sequence representation, making sure to pad or truncate to a fixed length, which in our case is `500`.
```
def convert_and_pad(word_dict, sentence, pad=500):
NOWORD = 0 # We will use 0 to represent the 'no word' category
INFREQ = 1 # and we use 1 to represent the infrequent words, i.e., words not appearing in word_dict
working_sentence = [NOWORD] * pad
for word_index, word in enumerate(sentence[:pad]):
if word in word_dict:
working_sentence[word_index] = word_dict[word]
else:
working_sentence[word_index] = INFREQ
return working_sentence, min(len(sentence), pad)
def convert_and_pad_data(word_dict, data, pad=500):
result = []
lengths = []
for sentence in data:
converted, leng = convert_and_pad(word_dict, sentence, pad)
result.append(converted)
lengths.append(leng)
return np.array(result), np.array(lengths)
train_X, train_X_len = convert_and_pad_data(word_dict, train_X)
test_X, test_X_len = convert_and_pad_data(word_dict, test_X)
```
As a quick check to make sure that things are working as intended, check to see what one of the reviews in the training set looks like after having been processeed. Does this look reasonable? What is the length of a review in the training set?
```
# Use this cell to examine one of the processed reviews to make sure everything is working as intended.
print(len(train_X))
print(len(train_X_len))
print(len(test_X))
print(len(test_X_len))
print(len(train_X[100]))
print(train_X[100])
```
**Question:** In the cells above we use the `preprocess_data` and `convert_and_pad_data` methods to process both the training and testing set. Why or why not might this be a problem?
**Answer:** it can be a problem. we should only have access to the training set so our transformer can only use the training set to construct a representation. The picke file take long time to process until finished. preprocess_data faces problem when the pickle file is not ready yet.
convert_and_pad_data may have problem if the review is too long (now size ist limited to 500)-> we will lost the information. Maybe we extract the total different meaning of the original review, when the critical comments in the last part of the review.
## Step 3: Upload the data to S3
As in the XGBoost notebook, we will need to upload the training dataset to S3 in order for our training code to access it. For now we will save it locally and we will upload to S3 later on.
### Save the processed training dataset locally
It is important to note the format of the data that we are saving as we will need to know it when we write the training code. In our case, each row of the dataset has the form `label`, `length`, `review[500]` where `review[500]` is a sequence of `500` integers representing the words in the review.
```
import pandas as pd
pd.concat([pd.DataFrame(train_y), pd.DataFrame(train_X_len), pd.DataFrame(train_X)], axis=1) \
.to_csv(os.path.join(data_dir, 'train.csv'), header=False, index=False)
```
### Uploading the training data
Next, we need to upload the training data to the SageMaker default S3 bucket so that we can provide access to it while training our model.
```
import sagemaker
sagemaker_session = sagemaker.Session()
bucket = sagemaker_session.default_bucket()
prefix = 'sagemaker/sentiment_rnn'
role = sagemaker.get_execution_role()
input_data = sagemaker_session.upload_data(path=data_dir, bucket=bucket, key_prefix=prefix)
```
**NOTE:** The cell above uploads the entire contents of our data directory. This includes the `word_dict.pkl` file. This is fortunate as we will need this later on when we create an endpoint that accepts an arbitrary review. For now, we will just take note of the fact that it resides in the data directory (and so also in the S3 training bucket) and that we will need to make sure it gets saved in the model directory.
## Step 4: Build and Train the PyTorch Model
In the XGBoost notebook we discussed what a model is in the SageMaker framework. In particular, a model comprises three objects
- Model Artifacts,
- Training Code, and
- Inference Code,
each of which interact with one another. In the XGBoost example we used training and inference code that was provided by Amazon. Here we will still be using containers provided by Amazon with the added benefit of being able to include our own custom code.
We will start by implementing our own neural network in PyTorch along with a training script. For the purposes of this project we have provided the necessary model object in the `model.py` file, inside of the `train` folder. You can see the provided implementation by running the cell below.
```
!pygmentize train/model.py
```
The important takeaway from the implementation provided is that there are three parameters that we may wish to tweak to improve the performance of our model. These are the embedding dimension, the hidden dimension and the size of the vocabulary. We will likely want to make these parameters configurable in the training script so that if we wish to modify them we do not need to modify the script itself. We will see how to do this later on. To start we will write some of the training code in the notebook so that we can more easily diagnose any issues that arise.
First we will load a small portion of the training data set to use as a sample. It would be very time consuming to try and train the model completely in the notebook as we do not have access to a gpu and the compute instance that we are using is not particularly powerful. However, we can work on a small bit of the data to get a feel for how our training script is behaving.
```
import torch
import torch.utils.data
# Read in only the first 250 rows
train_sample = pd.read_csv(os.path.join(data_dir, 'train.csv'), header=None, names=None, nrows=250)
# Turn the input pandas dataframe into tensors
train_sample_y = torch.from_numpy(train_sample[[0]].values).float().squeeze()
train_sample_X = torch.from_numpy(train_sample.drop([0], axis=1).values).long()
# Build the dataset
train_sample_ds = torch.utils.data.TensorDataset(train_sample_X, train_sample_y)
# Build the dataloader
train_sample_dl = torch.utils.data.DataLoader(train_sample_ds, batch_size=50)
```
### (TODO) Writing the training method
Next we need to write the training code itself. This should be very similar to training methods that you have written before to train PyTorch models. We will leave any difficult aspects such as model saving / loading and parameter loading until a little later.
```
def train(model, train_loader, epochs, optimizer, loss_fn, device):
for epoch in range(1, epochs + 1):
model.train()
total_loss = 0
for batch in train_loader:
batch_X, batch_y = batch
batch_X = batch_X.to(device)
batch_y = batch_y.to(device)
# TODO: Complete this train method to train the model provided.
optimizer.zero_grad()
out = model(batch_X)
#model.zero_grad()
#out = model.forward(batch_X)
loss = loss_fn(out, batch_y)
loss.backward()
optimizer.step()
total_loss += loss.data.item()
print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader)))
```
Supposing we have the training method above, we will test that it is working by writing a bit of code in the notebook that executes our training method on the small sample training set that we loaded earlier. The reason for doing this in the notebook is so that we have an opportunity to fix any errors that arise early when they are easier to diagnose.
```
import torch.optim as optim
from train.model import LSTMClassifier
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = LSTMClassifier(32, 100, 5000).to(device)
optimizer = optim.Adam(model.parameters())
loss_fn = torch.nn.BCELoss()
train(model, train_sample_dl, 5, optimizer, loss_fn, device)
```
In order to construct a PyTorch model using SageMaker we must provide SageMaker with a training script. We may optionally include a directory which will be copied to the container and from which our training code will be run. When the training container is executed it will check the uploaded directory (if there is one) for a `requirements.txt` file and install any required Python libraries, after which the training script will be run.
### (TODO) Training the model
When a PyTorch model is constructed in SageMaker, an entry point must be specified. This is the Python file which will be executed when the model is trained. Inside of the `train` directory is a file called `train.py` which has been provided and which contains most of the necessary code to train our model. The only thing that is missing is the implementation of the `train()` method which you wrote earlier in this notebook.
**TODO**: Copy the `train()` method written above and paste it into the `train/train.py` file where required.
The way that SageMaker passes hyperparameters to the training script is by way of arguments. These arguments can then be parsed and used in the training script. To see how this is done take a look at the provided `train/train.py` file.
```
from sagemaker.pytorch import PyTorch
estimator = PyTorch(entry_point="train.py",
source_dir="train",
role=role,
framework_version='0.4.0',
train_instance_count=1,
train_instance_type='ml.p2.xlarge', #original: train_instance_type='ml.p2.xlarge', the support center haven't processed my ticket
hyperparameters={
'epochs': 10,
'hidden_dim': 200,
})
estimator.fit({'training': input_data})
```
## Step 5: Testing the model
As mentioned at the top of this notebook, we will be testing this model by first deploying it and then sending the testing data to the deployed endpoint. We will do this so that we can make sure that the deployed model is working correctly.
## Step 6: Deploy the model for testing
Now that we have trained our model, we would like to test it to see how it performs. Currently our model takes input of the form `review_length, review[500]` where `review[500]` is a sequence of `500` integers which describe the words present in the review, encoded using `word_dict`. Fortunately for us, SageMaker provides built-in inference code for models with simple inputs such as this.
There is one thing that we need to provide, however, and that is a function which loads the saved model. This function must be called `model_fn()` and takes as its only parameter a path to the directory where the model artifacts are stored. This function must also be present in the python file which we specified as the entry point. In our case the model loading function has been provided and so no changes need to be made.
**NOTE**: When the built-in inference code is run it must import the `model_fn()` method from the `train.py` file. This is why the training code is wrapped in a main guard ( ie, `if __name__ == '__main__':` )
Since we don't need to change anything in the code that was uploaded during training, we can simply deploy the current model as-is.
**NOTE:** When deploying a model you are asking SageMaker to launch an compute instance that will wait for data to be sent to it. As a result, this compute instance will continue to run until *you* shut it down. This is important to know since the cost of a deployed endpoint depends on how long it has been running for.
In other words **If you are no longer using a deployed endpoint, shut it down!**
**TODO:** Deploy the trained model.
```
estimator.model_data
training_job_name=estimator.latest_training_job.name
# TODO: Deploy the trained model
predictor = estimator.deploy(initial_instance_count = 1, instance_type = 'ml.m4.xlarge')
#original: train_instance_type='ml.p2.xlarge', the support center haven't processed my ticket
model_predict = PyTorchModel(model_data=estimator.model_data,
role = role,
framework_version='0.4.0',
entry_point="train.py",
source_dir="train")
predictor_predict = model_predict.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
predictor_predict.endpoint
predictor = estimator.deploy(initial_instance_count = 1, instance_type = 'ml.p2.xlarge')
```
## Step 7 - Use the model for testing
Once deployed, we can read in the test data and send it off to our deployed model to get some results. Once we collect all of the results we can determine how accurate our model is.
```
test_X = pd.concat([pd.DataFrame(test_X_len), pd.DataFrame(test_X)], axis=1)
# We split the data into chunks and send each chunk seperately, accumulating the results.
def predict(data, rows=512):
split_array = np.array_split(data, int(data.shape[0] / float(rows) + 1))
predictions = np.array([])
for array in split_array:
predictions = np.append(predictions, predictor.predict(array))
return predictions
predictions = predict(test_X.values)
predictions = [round(num) for num in predictions]
from sklearn.metrics import accuracy_score
accuracy_score(test_y, predictions)
```
**Question:** How does this model compare to the XGBoost model you created earlier? Why might these two models perform differently on this dataset? Which do *you* think is better for sentiment analysis?
**Answer:**XGBoost has a better accuray score than this model and run much faster. The trainning job for XGBoost takes only 3 minutes, but this model takes 2 hours. I think XGBoost is better for sentiment analysis. Actually this two accuray score is close to each other, may this model can be improved somehow.
### (TODO) More testing
We now have a trained model which has been deployed and which we can send processed reviews to and which returns the predicted sentiment. However, ultimately we would like to be able to send our model an unprocessed review. That is, we would like to send the review itself as a string. For example, suppose we wish to send the following review to our model.
```
test_review = 'The simplest pleasures in life are the best, and this film is one of them. Combining a rather basic storyline of love and adventure this movie transcends the usual weekend fair with wit and unmitigated charm.'
```
The question we now need to answer is, how do we send this review to our model?
Recall in the first section of this notebook we did a bunch of data processing to the IMDb dataset. In particular, we did two specific things to the provided reviews.
- Removed any html tags and stemmed the input
- Encoded the review as a sequence of integers using `word_dict`
In order process the review we will need to repeat these two steps.
**TODO**: Using the `review_to_words` and `convert_and_pad` methods from section one, convert `test_review` into a numpy array `test_data` suitable to send to our model. Remember that our model expects input of the form `review_length, review[500]`.
```
# TODO: Convert test_review into a form usable by the model and save the results in test_data
test_data = None
test_words = review_to_words(test_review)
test_data, test_data_len = convert_and_pad(word_dict, test_words)
```
Now that we have processed the review, we can send the resulting array to our model to predict the sentiment of the review.
```
predictor.predict(test_data)
```
Since the return value of our model is close to `1`, we can be certain that the review we submitted is positive.
### Delete the endpoint
Of course, just like in the XGBoost notebook, once we've deployed an endpoint it continues to run until we tell it to shut down. Since we are done using our endpoint for now, we can delete it.
```
estimator.delete_endpoint()
```
## Step 6 (again) - Deploy the model for the web app
Now that we know that our model is working, it's time to create some custom inference code so that we can send the model a review which has not been processed and have it determine the sentiment of the review.
As we saw above, by default the estimator which we created, when deployed, will use the entry script and directory which we provided when creating the model. However, since we now wish to accept a string as input and our model expects a processed review, we need to write some custom inference code.
We will store the code that we write in the `serve` directory. Provided in this directory is the `model.py` file that we used to construct our model, a `utils.py` file which contains the `review_to_words` and `convert_and_pad` pre-processing functions which we used during the initial data processing, and `predict.py`, the file which will contain our custom inference code. Note also that `requirements.txt` is present which will tell SageMaker what Python libraries are required by our custom inference code.
When deploying a PyTorch model in SageMaker, you are expected to provide four functions which the SageMaker inference container will use.
- `model_fn`: This function is the same function that we used in the training script and it tells SageMaker how to load our model.
- `input_fn`: This function receives the raw serialized input that has been sent to the model's endpoint and its job is to de-serialize and make the input available for the inference code.
- `output_fn`: This function takes the output of the inference code and its job is to serialize this output and return it to the caller of the model's endpoint.
- `predict_fn`: The heart of the inference script, this is where the actual prediction is done and is the function which you will need to complete.
For the simple website that we are constructing during this project, the `input_fn` and `output_fn` methods are relatively straightforward. We only require being able to accept a string as input and we expect to return a single value as output. You might imagine though that in a more complex application the input or output may be image data or some other binary data which would require some effort to serialize.
### (TODO) Writing inference code
Before writing our custom inference code, we will begin by taking a look at the code which has been provided.
```
!pygmentize serve/predict.py
```
As mentioned earlier, the `model_fn` method is the same as the one provided in the training code and the `input_fn` and `output_fn` methods are very simple and your task will be to complete the `predict_fn` method. Make sure that you save the completed file as `predict.py` in the `serve` directory.
**TODO**: Complete the `predict_fn()` method in the `serve/predict.py` file.
### Deploying the model
Now that the custom inference code has been written, we will create and deploy our model. To begin with, we need to construct a new PyTorchModel object which points to the model artifacts created during training and also points to the inference code that we wish to use. Then we can call the deploy method to launch the deployment container.
**NOTE**: The default behaviour for a deployed PyTorch model is to assume that any input passed to the predictor is a `numpy` array. In our case we want to send a string so we need to construct a simple wrapper around the `RealTimePredictor` class to accomodate simple strings. In a more complicated situation you may want to provide a serialization object, for example if you wanted to sent image data.
```
from sagemaker.predictor import RealTimePredictor
from sagemaker.pytorch import PyTorchModel
class StringPredictor(RealTimePredictor):
def __init__(self, endpoint_name, sagemaker_session):
super(StringPredictor, self).__init__(endpoint_name, sagemaker_session, content_type='text/plain')
model = PyTorchModel(model_data=estimator.model_data,
role = role,
framework_version='0.4.0',
entry_point='predict.py',
source_dir='serve',
predictor_cls=StringPredictor)
predictor = model.deploy(initial_instance_count=1, instance_type='ml.m4.xlarge')
```
### Testing the model
Now that we have deployed our model with the custom inference code, we should test to see if everything is working. Here we test our model by loading the first `250` positive and negative reviews and send them to the endpoint, then collect the results. The reason for only sending some of the data is that the amount of time it takes for our model to process the input and then perform inference is quite long and so testing the entire data set would be prohibitive.
```
import glob
def test_reviews(data_dir='../data/aclImdb', stop=250):
results = []
ground = []
# We make sure to test both positive and negative reviews
for sentiment in ['pos', 'neg']:
path = os.path.join(data_dir, 'test', sentiment, '*.txt')
files = glob.glob(path)
files_read = 0
print('Starting ', sentiment, ' files')
# Iterate through the files and send them to the predictor
for f in files:
with open(f) as review:
# First, we store the ground truth (was the review positive or negative)
if sentiment == 'pos':
ground.append(1)
else:
ground.append(0)
# Read in the review and convert to 'utf-8' for transmission via HTTP
review_input = review.read().encode('utf-8')
# Send the review to the predictor and store the results
results.append(int(predictor.predict(review_input)))
# Sending reviews to our endpoint one at a time takes a while so we
# only send a small number of reviews
files_read += 1
if files_read == stop:
break
return ground, results
ground, results = test_reviews()
from sklearn.metrics import accuracy_score
accuracy_score(ground, results)
```
As an additional test, we can try sending the `test_review` that we looked at earlier.
```
predictor.predict(test_review)
```
Now that we know our endpoint is working as expected, we can set up the web page that will interact with it. If you don't have time to finish the project now, make sure to skip down to the end of this notebook and shut down your endpoint. You can deploy it again when you come back.
## Step 7 (again): Use the model for the web app
> **TODO:** This entire section and the next contain tasks for you to complete, mostly using the AWS console.
So far we have been accessing our model endpoint by constructing a predictor object which uses the endpoint and then just using the predictor object to perform inference. What if we wanted to create a web app which accessed our model? The way things are set up currently makes that not possible since in order to access a SageMaker endpoint the app would first have to authenticate with AWS using an IAM role which included access to SageMaker endpoints. However, there is an easier way! We just need to use some additional AWS services.
<img src="Web App Diagram.svg">
The diagram above gives an overview of how the various services will work together. On the far right is the model which we trained above and which is deployed using SageMaker. On the far left is our web app that collects a user's movie review, sends it off and expects a positive or negative sentiment in return.
In the middle is where some of the magic happens. We will construct a Lambda function, which you can think of as a straightforward Python function that can be executed whenever a specified event occurs. We will give this function permission to send and recieve data from a SageMaker endpoint.
Lastly, the method we will use to execute the Lambda function is a new endpoint that we will create using API Gateway. This endpoint will be a url that listens for data to be sent to it. Once it gets some data it will pass that data on to the Lambda function and then return whatever the Lambda function returns. Essentially it will act as an interface that lets our web app communicate with the Lambda function.
### Setting up a Lambda function
The first thing we are going to do is set up a Lambda function. This Lambda function will be executed whenever our public API has data sent to it. When it is executed it will receive the data, perform any sort of processing that is required, send the data (the review) to the SageMaker endpoint we've created and then return the result.
#### Part A: Create an IAM Role for the Lambda function
Since we want the Lambda function to call a SageMaker endpoint, we need to make sure that it has permission to do so. To do this, we will construct a role that we can later give the Lambda function.
Using the AWS Console, navigate to the **IAM** page and click on **Roles**. Then, click on **Create role**. Make sure that the **AWS service** is the type of trusted entity selected and choose **Lambda** as the service that will use this role, then click **Next: Permissions**.
In the search box type `sagemaker` and select the check box next to the **AmazonSageMakerFullAccess** policy. Then, click on **Next: Review**.
Lastly, give this role a name. Make sure you use a name that you will remember later on, for example `LambdaSageMakerRole`. Then, click on **Create role**.
#### Part B: Create a Lambda function
Now it is time to actually create the Lambda function.
Using the AWS Console, navigate to the AWS Lambda page and click on **Create a function**. When you get to the next page, make sure that **Author from scratch** is selected. Now, name your Lambda function, using a name that you will remember later on, for example `sentiment_analysis_func`. Make sure that the **Python 3.6** runtime is selected and then choose the role that you created in the previous part. Then, click on **Create Function**.
On the next page you will see some information about the Lambda function you've just created. If you scroll down you should see an editor in which you can write the code that will be executed when your Lambda function is triggered. In our example, we will use the code below.
```python
# We need to use the low-level library to interact with SageMaker since the SageMaker API
# is not available natively through Lambda.
import boto3
def lambda_handler(event, context):
# The SageMaker runtime is what allows us to invoke the endpoint that we've created.
runtime = boto3.Session().client('sagemaker-runtime')
# Now we use the SageMaker runtime to invoke our endpoint, sending the review we were given
response = runtime.invoke_endpoint(EndpointName = '**ENDPOINT NAME HERE**', # The name of the endpoint we created
ContentType = 'text/plain', # The data format that is expected
Body = event['body']) # The actual review
# The response is an HTTP response whose body contains the result of our inference
result = response['Body'].read().decode('utf-8')
return {
'statusCode' : 200,
'headers' : { 'Content-Type' : 'text/plain', 'Access-Control-Allow-Origin' : '*' },
'body' : result
}
```
Once you have copy and pasted the code above into the Lambda code editor, replace the `**ENDPOINT NAME HERE**` portion with the name of the endpoint that we deployed earlier. You can determine the name of the endpoint using the code cell below.
```
predictor.endpoint
```
Once you have added the endpoint name to the Lambda function, click on **Save**. Your Lambda function is now up and running. Next we need to create a way for our web app to execute the Lambda function.
### Setting up API Gateway
Now that our Lambda function is set up, it is time to create a new API using API Gateway that will trigger the Lambda function we have just created.
Using AWS Console, navigate to **Amazon API Gateway** and then click on **Get started**.
On the next page, make sure that **New API** is selected and give the new api a name, for example, `sentiment_analysis_api`. Then, click on **Create API**.
Now we have created an API, however it doesn't currently do anything. What we want it to do is to trigger the Lambda function that we created earlier.
Select the **Actions** dropdown menu and click **Create Method**. A new blank method will be created, select its dropdown menu and select **POST**, then click on the check mark beside it.
For the integration point, make sure that **Lambda Function** is selected and click on the **Use Lambda Proxy integration**. This option makes sure that the data that is sent to the API is then sent directly to the Lambda function with no processing. It also means that the return value must be a proper response object as it will also not be processed by API Gateway.
Type the name of the Lambda function you created earlier into the **Lambda Function** text entry box and then click on **Save**. Click on **OK** in the pop-up box that then appears, giving permission to API Gateway to invoke the Lambda function you created.
The last step in creating the API Gateway is to select the **Actions** dropdown and click on **Deploy API**. You will need to create a new Deployment stage and name it anything you like, for example `prod`.
You have now successfully set up a public API to access your SageMaker model. Make sure to copy or write down the URL provided to invoke your newly created public API as this will be needed in the next step. This URL can be found at the top of the page, highlighted in blue next to the text **Invoke URL**.
## Step 4: Deploying our web app
Now that we have a publicly available API, we can start using it in a web app. For our purposes, we have provided a simple static html file which can make use of the public api you created earlier.
In the `website` folder there should be a file called `index.html`. Download the file to your computer and open that file up in a text editor of your choice. There should be a line which contains **\*\*REPLACE WITH PUBLIC API URL\*\***. Replace this string with the url that you wrote down in the last step and then save the file.
Now, if you open `index.html` on your local computer, your browser will behave as a local web server and you can use the provided site to interact with your SageMaker model.
If you'd like to go further, you can host this html file anywhere you'd like, for example using github or hosting a static site on Amazon's S3. Once you have done this you can share the link with anyone you'd like and have them play with it too!
> **Important Note** In order for the web app to communicate with the SageMaker endpoint, the endpoint has to actually be deployed and running. This means that you are paying for it. Make sure that the endpoint is running when you want to use the web app but that you shut it down when you don't need it, otherwise you will end up with a surprisingly large AWS bill.
**TODO:** Make sure that you include the edited `index.html` file in your project submission.
Now that your web app is working, trying playing around with it and see how well it works.
**Question**: Give an example of a review that you entered into your web app. What was the predicted sentiment of your example review?
**Answer:**
### Delete the endpoint
Remember to always shut down your endpoint if you are no longer using it. You are charged for the length of time that the endpoint is running so if you forget and leave it on you could end up with an unexpectedly large bill.
```
predictor.delete_endpoint()
```
| github_jupyter |
## Statistics
### Questions
```{admonition} Problem: JOIN Dataframes
:class: dropdown, tip
Can you tell me the ways in which 2 pandas data frames can be joined?
```
```{admonition} Solution:
:class: dropdown
A very high level difference is that merge() is used to combine two (or more) dataframes on the basis of values of common columns (indices can also be used, use left_index=True and/or right_index=True), and concat() is used to append one (or more) dataframes one below the other (or sideways, depending on whether the axis option is set to 0 or 1).
join() is used to merge 2 dataframes on the basis of the index; instead of using merge() with the option left_index=True we can use join().

```
```{admonition} Problem: [GOOGLE] Normal Distribution
:class: dropdown, tip
Write a function to generate N samples from a normal distribution and plot the histogram.
```
```
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
def normal_sample_generator(N):
# can be done using np.random.randn or stats.norm.rvs
#x = np.random.randn(N)
x = stats.norm.rvs(size=N)
num_bins = 20
plt.hist(x, bins=num_bins, facecolor='blue', alpha=0.5)
y = np.linspace(-4, 4, N)
bin_width = (x.max() - x.min()) / num_bins
plt.plot(y, stats.norm.pdf(y) * N * bin_width)
plt.show()
normal_sample_generator(10000)
```
```{admonition} Problem: [UBER] Bernoulli trial generator
:class: dropdown, tip
Given a random Bernoulli trial generator, write a function to return a value sampled from a normal distribution.
```
```{admonition} Solution:
:class: dropdown
Solution pending, [Reference material link](Given a random Bernoulli trial generator, how do you return a value sampled from a normal distribution?)
```
```{admonition} Problem: [PINTEREST] Interquartile Distance
:class: dropdown, tip
Given an array of unsorted random numbers (decimals) find the interquartile distance.
```
```
# Interquartile distance is the difference between first and third quartile
# first let's generate a list of random numbers
import random
import numpy as np
li = [round(random.uniform(33.33, 66.66), 2) for i in range(50)]
print(li)
qtl_1 = np.quantile(li,.25)
qtl_3 = np.quantile(li,.75)
print("Interquartile distance: ", qtl_1 - qtl_3)
```
````{admonition} Problem: [GENENTECH] Imputing the mdeian
:class: dropdown, tip
Write a function cheese_median to impute the median price of the selected California cheeses in place of the missing values. You may assume at least one cheese is not missing its price.
Input:
```python
import pandas as pd
cheeses = {"Name": ["Bohemian Goat", "Central Coast Bleu", "Cowgirl Mozzarella", "Cypress Grove Cheddar", "Oakdale Colby"], "Price" : [15.00, None, 30.00, None, 45.00]}
df_cheeses = pd.DataFrame(cheeses)
```
| Name | Price |
|:---------------------:|:-----:|
| Bohemian Goat | 15.00 |
| Central Coast Bleu | 30.00 |
| Cowgirl Mozzarella | 30.00 |
| Cypress Grove Cheddar | 30.00 |
| Oakdale Colby | 45.00 |
````
```
import pandas as pd
cheeses = {"Name": ["Bohemian Goat", "Central Coast Bleu", "Cowgirl Mozzarella", "Cypress Grove Cheddar", "Oakdale Colby"], "Price" : [15.00, None, 30.00, None, 45.00]}
df_cheeses = pd.DataFrame(cheeses)
df_cheeses['Price'] = df_cheeses['Price'].fillna(df_cheeses['Price'].median())
df_cheeses.head()
```
| github_jupyter |
# Real Estate Price Prediction
```
import pandas as pd
df = pd.read_csv("data.csv")
df.head()
df['CHAS'].value_counts()
df.info()
df.describe()
%matplotlib inline
import matplotlib.pyplot as plt
df.hist(bins=50, figsize=(20,15))
```
## train_test_split
```
import numpy as np
def split_train_test(data, test_ratio):
np.random.seed(42)
shuffled = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
test_indices = shuffled[:test_set_size]
train_indices = shuffled[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_test(df, 0.2)
print(f"The length of train dataset is: {len(train_set)}")
print(f"The length of train dataset is: {len(test_set)}")
def data_percent_allocation(train_set, test_set):
total = len(df)
train_percent = round((len(train_set)/total) * 100)
test_percent = round((len(test_set)/total) * 100)
return train_percent, test_percent
data_percent_allocation(train_set, test_set)
```
## train_test_split from sklearn
```
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(df, test_size = 0.2, random_state = 42)
print(f"The length of train dataset is: {len(train_set)}")
print(f"The length of train dataset is: {len(test_set)}")
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits = 1, test_size = 0.2, random_state = 42)
for train_index, test_index in split.split(df, df['CHAS']):
strat_train_set = df.loc[train_index]
strat_test_set = df.loc[test_index]
strat_test_set['CHAS'].value_counts()
test_set['CHAS'].value_counts()
strat_train_set['CHAS'].value_counts()
train_set['CHAS'].value_counts()
```
### Stratified learning equal splitting of zero and ones
```
95/7
376/28
df = strat_train_set.copy()
```
## Corelations
```
from pandas.plotting import scatter_matrix
attributes = ["MEDV", "RM", "ZN" , "LSTAT"]
scatter_matrix(df[attributes], figsize = (12,8))
df.plot(kind="scatter", x="RM", y="MEDV", alpha=1)
```
### Trying out attribute combinations
```
df["TAXRM"] = df["TAX"]/df["RM"]
df.head()
corr_matrix = df.corr()
corr_matrix['MEDV'].sort_values(ascending=False)
# 1 means strong positive corr and -1 means strong negative corr.
# EX: if RM will increase our final result(MEDV) in prediction will also increase.
df.plot(kind="scatter", x="TAXRM", y="MEDV", alpha=1)
df = strat_train_set.drop("MEDV", axis=1)
df_labels = strat_train_set["MEDV"].copy()
```
## Pipeline
```
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
my_pipeline = Pipeline([
('imputer', SimpleImputer(strategy="median")),
('std_scaler', StandardScaler()),
])
df_numpy = my_pipeline.fit_transform(df)
df_numpy
#Numpy array of df as models will take numpy array as input.
df_numpy.shape
```
## Model Selection
```
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# model = LinearRegression()
# model = DecisionTreeRegressor()
model = RandomForestRegressor()
model.fit(df_numpy, df_labels)
some_data = df.iloc[:5]
some_labels = df_labels.iloc[:5]
prepared_data = my_pipeline.transform(some_data)
model.predict(prepared_data)
list(some_labels)
```
## Evaluating the model
```
from sklearn.metrics import mean_squared_error
df_predictions = model.predict(df_numpy)
mse = mean_squared_error(df_labels, df_predictions)
rmse = np.sqrt(mse)
rmse
# from sklearn.metrics import accuracy_score
# accuracy_score(some_data, some_labels, normalize=False)
```
## Cross Validation
```
from sklearn.model_selection import cross_val_score
scores = cross_val_score(model, df_numpy, df_labels, scoring="neg_mean_squared_error", cv=10)
rmse_scores = np.sqrt(-scores)
rmse_scores
def print_scores(scores):
print("Scores:", scores)
print("\nMean:", scores.mean())
print("\nStandard deviation:", scores.std())
print_scores(rmse_scores)
```
### Saving Model
```
from joblib import dump, load
dump(model, 'final_model.joblib')
dump(model, 'final_model.sav')
```
## Testing model on test data
```
X_test = strat_test_set.drop("MEDV", axis=1)
Y_test = strat_test_set["MEDV"].copy()
X_test_prepared = my_pipeline.transform(X_test)
final_predictions = model.predict(X_test_prepared)
final_mse = mean_squared_error(Y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
final_rmse
```
| github_jupyter |
# In-Place Waveform Library Updates
This example notebook shows how one can update pulses data in-place without recompiling.
© Raytheon BBN Technologies 2020
Set the `SAVE_WF_OFFSETS` flag in order that QGL will output a map of the waveform data within the compiled binary waveform library.
```
from QGL import *
import QGL
import os.path
import pickle
QGL.drivers.APS2Pattern.SAVE_WF_OFFSETS = True
```
Create the usual channel library with a couple of AWGs.
```
cl = ChannelLibrary(":memory:")
q1 = cl.new_qubit("q1")
aps2_1 = cl.new_APS2("BBNAPS1", address="192.168.5.101")
aps2_2 = cl.new_APS2("BBNAPS2", address="192.168.5.102")
dig_1 = cl.new_X6("X6_1", address=0)
h1 = cl.new_source("Holz1", "HolzworthHS9000", "HS9004A-009-1", power=-30)
h2 = cl.new_source("Holz2", "HolzworthHS9000", "HS9004A-009-2", power=-30)
cl.set_control(q1, aps2_1, generator=h1)
cl.set_measure(q1, aps2_2, dig_1.ch(1), generator=h2)
cl.set_master(aps2_1, aps2_1.ch("m2"))
cl["q1"].measure_chan.frequency = 0e6
cl.commit()
```
Compile a simple sequence.
```
mf = RabiAmp(cl["q1"], np.linspace(-1, 1, 11))
plot_pulse_files(mf, time=True)
```
Open the offsets file (in the same directory as the `.aps2` files, one per AWG slice.)
```
offset_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.offsets")
with open(offset_f, "rb") as FID:
offsets = pickle.load(FID)
offsets
```
Let's replace every single pulse with a fixed amplitude `Utheta`
```
pulses = {l: Utheta(q1, amp=0.1, phase=0) for l in offsets}
wfm_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.aps2")
QGL.drivers.APS2Pattern.update_wf_library(wfm_f, pulses, offsets)
```
We see that the data in the file has been updated.
```
plot_pulse_files(mf, time=True)
```
## Profiling
How long does this take?
```
%timeit mf = RabiAmp(cl["q1"], np.linspace(-1, 1, 100))
```
Getting the offsets is fast, and only needs to be done once
```
def get_offsets():
offset_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.offsets")
with open(offset_f, "rb") as FID:
offsets = pickle.load(FID)
return offsets
%timeit offsets = get_offsets()
%timeit pulses = {l: Utheta(q1, amp=0.1, phase=0) for l in offsets}
wfm_f = os.path.join(os.path.dirname(mf), "Rabi-BBNAPS1.aps2")
%timeit QGL.drivers.APS2Pattern.update_wf_library(wfm_f, pulses, offsets)
# %timeit QGL.drivers.APS2Pattern.update_wf_library("/Users/growland/workspace/AWG/Rabi/Rabi-BBNAPS1.aps2", pulses, offsets)
```
Moral of the story: 300 ms for initial compilation, and roughly 1.3 ms for update_in_place.
| github_jupyter |
```
%load_ext Cython
import numpy as np
np.set_printoptions(precision=2,suppress=True,linewidth=250,threshold=2000)
import numpy as np
import pandas as pd
import pyBigWig
import math
import csv
import multiprocessing
bw = pyBigWig.open("/home/musab/bigwig/wgEncodeSydhTfbsHepg2Arid3anb100279IggrabSig.bigWig")
chromo = 'chr14'
total_size = bw.chroms()[chromo]
F=bw.values(chromo,0,total_size,numpy=True)
F[np.isnan(F)] = 0
%%cython -a
import numpy as np
import pandas as pd
import math
import copy
import os
import cython
@cython.boundscheck(False)
@cython.wraparound(False)
cdef V_BC ( long i, long j,double [::1] CP,double [::1] CiP):
cdef double P_SUM,iP_SUM
if i == 1:
P_SUM = CP[j-1]
iP_SUM = CiP[j-1]
else:
P_SUM = CP[j-1]-CP[i-2]
iP_SUM = CiP[j-1]-CiP[i-2]
try:
return (P_SUM*(iP_SUM/P_SUM)**2)
except:
return 0
@cython.boundscheck(False)
@cython.wraparound(False)
cdef mean( long i, long j,double [::1] CP,double [::1] Cip):
cdef double P_SUM,iP_SUM
if i == 1:
P_SUM = CP[j-1]
iP_SUM = Cip[j-1]
else:
P_SUM = CP[j-1]-CP[i-2]
iP_SUM = Cip[j-1]-Cip[i-2]
try:
return iP_SUM/P_SUM
except:
return 0
@cython.boundscheck(False)
@cython.wraparound(False)
cdef lookup(long col,long j,long row,double [:,::1] C,double [::1] CP,double [::1] CiP):
return C[col+j,j] + V_BC(col+(j+1) , row+(j+1),CP,CiP)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef SMAWK(long [::1] mri,long [::1] mci, long j,double [:,::1] C,long [:,::1] D,long [::1]mposi,double [::1] CP,double [::1] CiP):
cdef long [::1] aci
cdef long [::1] bri
if mci.shape[0] != mri.shape[0]:
aci=REDUCE(mri,mci,j,C,CP,CiP)
else:
aci=mci
if (mri.shape[0]==1 and aci.shape[0]==1):
C[mri[0]+j+1,j+1]= lookup(aci[0],j,mri[0],C,CP,CiP)
mposi[mri[0]]=D[mri[0]+j,j]=aci[0]
return
bri = mri[1::2].copy()
SMAWK(bri,aci,j,C,D,mposi,CP,CiP)
MFILL(mri,aci,j,C,D,mposi,CP,CiP)
@cython.boundscheck(False)
@cython.wraparound(False)
cdef REDUCE( long [::1] rows, long [::1] cols, long j,double [:,::1] C,double [::1] CP,double [::1] CiP):
cdef long p = rows.shape[0]
cdef long ncols = cols.shape[0]
cdef long m = cols.shape[0]
predd = np.arange(-1,m+1,dtype=np.int64)
cdef long [::1] pred = predd
valuee = np.full((m+1),-1,dtype=np.double)
cdef double [::1] value = valuee
cdef long a=2
cdef long b=1
rett = np.empty(p,dtype=np.int64)
cdef long [::1] ret = rett
cdef long lc = ncols+1
value[1]=lookup(cols[0],j,rows[0],C,CP,CiP)
while m > p:
if value[pred[a]] == -1:
value[pred[a]] = (lookup(cols[pred[a]-1] ,j, rows[b-1],C,CP,CiP) if cols[pred[a]-1] <= rows[b-1] else 0)
if value[pred[a]] >= (lookup(cols[a-1] ,j, rows[b-1],C,CP,CiP) if cols[a-1] <= rows[b-1] else 0) :
if b < p :
b = b+1
value[a] = (lookup(cols[a-1] ,j, rows[b-1],C,CP,CiP) if cols[a-1] <= rows[b-1] else 0)
else:
pred[a+1] = pred[a]
m = m-1
a = a+1
else:
pred[a] = pred[pred[a]]
m = m-1
if b != 1:
b = b-1
else:
a = a+1
for i in range(p-1,-1,-1):
ret[i]=cols[pred[lc]-1]
lc=pred[lc]
return ret
@cython.boundscheck(False)
@cython.wraparound(False)
cdef MFILL( long [::1] ari, long [::1] aci, long j,double [:,::1] C,long [:,::1] D,long [::1]mposi,double [::1] CP,double [::1] CiP):
cdef long m = ari.shape[0]
cdef long n = aci.shape[0]
cdef long ii = n-1
cdef long start
if (m % 2) == 0:
start = m-2
else:
start = m-1
cdef long s,e,ar,ac,i
cdef double MAX,cc,vv,CURRENT_MAT
for i in range(start,-1,-2):
if (i==0):
s=int(aci[i])
e=int(mposi[ari[i+1]])
elif (i==m-1):
s=int(mposi[ari[i-1]])
e=int(aci[n-1])
else:
s=int(mposi[ari[i-1]])
e=int(mposi[ari[i+1]])
if(e > ari[i]):
e=int(ari[i])
MAX = 0
while True:
ac = aci[ii]
ar = ari[i]
if (ac > ar):
pass
else:
CURRENT_MAT = lookup(ac,j,ar,C,CP,CiP)
if (MAX < CURRENT_MAT):
MAX = CURRENT_MAT
mposi[ar]=ac
C[ar+j+1,j+1]=MAX
D[ar+j,j]=ac+j
if(ac<=s or ii==-1):
break
ii-=1
@cython.boundscheck(False)
@cython.wraparound(False)
cdef findBestK( long n, long k,long [:,::1] D,double [::1] P,double [::1] CP,double [::1] Cip,double [::1] CF):
cdef double E1 = 0
cdef double mean1 = mean(1,n,CP,Cip)
cdef long J,kk,j,a,K,i,t
for J in range(1,n):
E1 += (P[J])*(abs(J-mean1))
cdef double bestAlpha = 0
cdef long bestK = 0
cdef double Dk,Ek,meanK,A,alpha
cdef long [::1] T
cdef long [::1] bestT
for kk in range(k,1,-1):
T = np.zeros(kk+2,dtype=np.int64)
T[0] = 1
T[kk+1] = n
i = n
for j in range(kk,0,-1):
T[j] = D[i-1,j]
i = int(D[i-1,j])
if i < 1:
i=1
Dk = mean(T[kk],T[kk+1],CP,Cip)-mean(T[0],T[1],CP,Cip)
Ek = 1
for K in range(kk+1):
meanK = mean(T[K],T[K+1],CP,Cip)
for J in range(T[K],T[K+1]):
Ek += (P[J])*(abs(J-meanK))
A = 1
for a in range (kk+1):
t = T[a]
A += P[t]
A *= math.sqrt(kk)
alpha=((((E1/Ek)*Dk)**2)/A)
if alpha > bestAlpha :
bestK = kk
bestT = T
bestAlpha = alpha
cdef double [::1] vol= np.empty(bestK)
cdef double [::1] leng = np.empty(bestK)
if bestK != 0:
for i in range (1,bestK+1):
vol[i-1]=(CF[bestT[i]]-CF[bestT[i-1]])
leng[i-1]=(bestT[i]-bestT[i-1])
else:
bestT = np.empty(0,dtype=long)
return bestT,bestK,vol,leng
@cython.boundscheck(False)
@cython.wraparound(False)
def L(float [::1] F, long k):
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import math
import copy
import time
import os
cdef long n = len(F)
cdef double F_SUM = sum(F)
if F_SUM == 0:
return [],0,[],[]
CC = np.zeros((n+1,k+2),dtype=np.float64)
DD = np.zeros((n+1,k+2),dtype=np.int64)
cdef double [:, ::1] C = CC
cdef long[:, ::1] D = DD
cdef long sizeMAT
cdef long[::1] mposi
cdef double [::1] P = np.empty(n)
cdef double [::1] CP = np.empty(n)
cdef double [::1] CF = np.empty(n)
cdef double [::1] CiP = np.empty(n)
cdef double [::1] Cip = np.empty(n)
cdef double PP,iP,ip
cdef double firstValue = F[0]/F_SUM
CF[0]=(F[0])
P[0]=(firstValue)
CP[0]=(firstValue)
CiP[0]=(firstValue)
Cip[0]=(firstValue)
cdef long i,j,dl
for i in range(1,n):
CF[i]=(CF[i-1]+F[i])
PP = F[i]/F_SUM
iP = PP * i
ip = PP * (i+1)
P[i]=(PP)
CP[i]=(CP[i-1]+PP)
CiP[i]=(CiP[i-1]+iP)
Cip[i]=(Cip[i-1]+ip)
mminTj = np.zeros(k+2,dtype=np.int64)
mmaxTj = np.zeros(k+2,dtype=np.int64)
cdef long[::1] minTj = mminTj
cdef long[::1] maxTj = mmaxTj
for j in range(0,k+2):
if j == k+1:
minTj[j] = n
else:
minTj[j] = j
for j in range(0,k+2):
if j == 0:
maxTj[j] = 0
else:
maxTj[j] = n-k+j-1
cdef long l,tj,ex=k-2
cdef double v,f
cdef long [::1] initial_rc
for j in range (0,k+1):
if j == 0:
for tj in range(minTj[j+1],maxTj[j+1]+1+ex):
C[tj,j+1]=V_BC(1,tj,CP,CiP)
else:
if (j>=(3)):
ex-=1
sizeMAT = n-k+1+ex
if (j != k):
mposi = np.zeros(sizeMAT-1,dtype=np.int64)
initial_rc = np.arange(0,sizeMAT-1,dtype=int)
SMAWK(initial_rc,initial_rc,j,C,D,mposi,CP,CiP)
else:
dl = minTj[k]
for l in range(minTj[k],maxTj[k]+1):
f = V_BC(l+1,minTj[k+1],CP,CiP)
v = f + C[l,k]
if (C[minTj[k+1],k+1] < v ):
C[minTj[k+1],k+1] = v
dl = l
D[maxTj[k],k]=dl
D[n,k+1] = n
cdef long [::1] bestT
cdef long bestK
cdef double [::1] vol
cdef double [::1] leng
bestT,bestK,vol,leng = findBestK(n,k,D,P,CP,Cip,CF)
return (bestT,bestK,vol,leng)
def window(tup):
start = tup[0]
end = tup[1]
froM = start
winSize = 10000
to = froM+winSize
'''
fname = str(str(chromo)+"-"+str(start)+"-"+str(end)+".csv")
with open(fname, 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["START", "END", "LENGTH", "VOLUME", "RANGE"])
'''
df = pd.DataFrame(columns=["CHR","START", "END", "LENGTH", "VOLUME", "RANGE"])
while(froM < end):
try:
bestT=[]
bestK=0
sF=F[froM:to]
if len(sF) != winSize:
sF=np.append(sF,np.zeros(winSize - len(sF),dtype=sF.dtype))
bestT,bestK,vol,leng=(L(sF,6))
'''
with open(fname, 'a', newline='') as file:
writer = csv.writer(file)
for i in range(bestK):
writer.writerow([froM+bestT[i],froM+bestT[i+1],leng[i],vol[i],str(str(froM)+':'+str(to))])
'''
if bestK != 0:
for i in range(bestK):
df = df.append({"CHR":chromo,"START":froM+bestT[i],"END":froM+bestT[i+1], "LENGTH":leng[i], "VOLUME":vol[i], "RANGE":str(str(froM)+':'+str(to))},ignore_index=True)
if bestK == 0:
froM+=winSize
else:
froM+=bestT[bestK]
to = froM+winSize
except Exception as e:
print("From:{}, To:{}, bestK={}".format(froM,to,bestK))
print('from Window, ',e)
froM+=winSize
to=froM+winSize
return df
%%time
path="/home/musab/chip-seq/GM12878-H3K27Ac_COPY/"
file = "wgEncodeBroadHistoneGm12878H3k27acStdSig.bigWig"
bw = pyBigWig.open(str(path+file))
full_result_list = []
for chromo in bw.chroms():
total_size = bw.chroms()[chromo]
F=bw.values(chromo,0,total_size,numpy=True)
F[np.isnan(F)] = 0
n = multiprocessing.cpu_count()
frag = math.ceil(total_size/n)
job = []
st=0
while (st<total_size):
en =st+frag
job.append((st,en))
st = en+1
pool = multiprocessing.Pool(processes=n)
r = pool.map(window,job)
result = pd.concat(r)
result = result.reset_index(drop=True)
full_result_list.extend(r)
result.to_csv(str(path+"results/"+chromo+'.csv'), index=False)
pool.close()
pool.join()
print("Chromo {} Done !".format(chromo))
full_result = pd.concat(full_result_list)
full_result = full_result.reset_index(drop=True)
full_result.to_csv(str(path+"results/"+'full.csv'), index=False)
import os
import glob
import pandas as pd
import math
os.chdir("/home/musab/chip-seq/GM12878-H3K27Ac/results/")
extension = 'csv'
all_filenames = [i for i in glob.glob('*.{}'.format(extension))]
#combine all files in the list
combined_csv = pd.concat([pd.read_csv(f) for f in all_filenames ])
combined_csv = combined_csv.sort_values('VOLUME', ascending=False)
#export to csv
# del(combined_csv['RANGE'])
# del(combined_csv['VOLUME'])
# del(combined_csv['LENGTH'])
#combined_csv.to_csv( "annotate.bed", header=False,index=False, sep='\t',encoding='utf-8-sig')
anno = combined_csv
del(anno['RANGE'])
del(anno['VOLUME'])
del(anno['LENGTH'])
length = anno.shape[0]
annotate = anno.head(math.ceil(40821*1.5))#math.ceil(length*0.01))
annotate.shape
annotate.to_csv( "anno_1.5.bed", header=False,index=False, sep='\t',encoding='utf-8-sig')
annotate.tail()
anno.tail()
```
| github_jupyter |
# Tutorial 09: Inflows
This tutorial walks you through the process of introducing inflows of vehicles into a network. Inflows allow us to simulate open networks where vehicles may enter (and potentially exit) the network. This exercise is organized as follows: in section 1 we prepare our inflows variables to support inflows into a merge network supplied by Flow, and in section 2 we simulate the merge network in the presence of inflows.
## 1. Adding Inflows
For this exercise, we will simulate inflows through a highway network with an on-merge. As we will see, the perturbations caused by vehicles entering through the on-merge leads the formation of congested waves downstream in the main highway.
We begin by importing the merge scenario class provided by Flow.
```
from flow.scenarios.merge import MergeScenario
```
A schematic of the above network is availabe in the figure below. As we can see, the edges at the start of the main highway and the on-merge are named "inflow_highway" and "inflow_merge" respectively. These names will be important to us when we begin specifying our inflows into the network.
<img src="img/merge_scheme.png" width="750">
We will also define the types of vehicles that are placed in the network. These types of vehicles will also be of significance to us once the inflows are being defined. For this exercise, we add only one type of vehicle to the network, with the vehicle identifier "human":
```
from flow.core.vehicles import Vehicles
from flow.controllers import IDMController
# create an empty vehicles object
vehicles = Vehicles()
# add some vehicles to this object of type "human"
vehicles.add("human",
acceleration_controller=(IDMController, {}),
speed_mode="no_collide", # we use the speed mode "no_collide" for better dynamics at the merge
num_vehicles=20)
```
Next, we are ready to import and create an empty inflows object.
```
from flow.core.params import InFlows
inflow = InFlows()
```
The `InFlows` object is provided as an input during the scenario creation process via the `NetParams` parameter. Introducing these inflows into the network is handled by the backend scenario generation processes during instantiation of the scenario object.
In order to add new inflows of vehicles of pre-defined types onto specific edges and lanes in the network, we use the `InFlows` object's `add` method. This function accepts the following parameters:
* **veh_type**: type of vehicles entering the edge, must match one of the types set in the Vehicles class
* **edge**: starting edge for vehicles in this inflow, must match an edge name in the network
* **veh_per_hour**: number of vehicles entering from the edge per hour, may not be achievable due to congestion and safe driving behavior
* other parameters, including: **start**, **end**, and **probability**. See documentation for more information.
In addition to the above parameters, several optional inputs to the `add` method may be found within sumo's documentation at: http://sumo.dlr.de/wiki/Definition_of_Vehicles,_Vehicle_Types,_and_Routes. Some important features include:
* **departLane**: specifies which lane vehicles will enter from on the edge, may be specified as "all" or "random"
* **departSpeed**: speed of the vehicles once they enter the network
We begin by adding inflows of vehicles at a rate of 2000 veh/hr through *all* lanes on the main highways as follows:
```
inflow.add(veh_type="human",
edge="inflow_highway",
vehs_per_hour=2000,
departSpeed=10,
departLane="random")
```
Next, we specify a second inflow of vehicles through the on-merge lane at a rate of only 100 veh/hr.
```
inflow.add(veh_type="human",
edge="inflow_merge",
vehs_per_hour=100,
departSpeed=10,
departLane="random")
```
## 2. Running Simulations with Inflows
We are now ready to test our inflows in simulation. As mentioned in section 1, the inflows are specified in the `NetParams` object, in addition to all other network-specific parameters. For the merge network, this is done as follows:
```
from flow.scenarios.merge import ADDITIONAL_NET_PARAMS
from flow.core.params import NetParams
additional_net_params = ADDITIONAL_NET_PARAMS.copy()
# we choose to make the main highway slightly longer
additional_net_params["pre_merge_length"] = 500
net_params = NetParams(inflows=inflow, # our inflows
no_internal_links=False,
additional_params=additional_net_params)
```
Finally, we execute the simulation following simulation creation techniques we learned from exercise 1 using the below code block. Running this simulation, we see an excessive number of vehicles entering from the main highway, but only a sparse number of vehicles entering from the on-merge. Nevertheless, this volume of merging vehicles is sufficient to form congestive patterns within the main highway.
```
from flow.core.params import SumoParams, EnvParams, InitialConfig
from flow.envs.loop.loop_accel import AccelEnv, ADDITIONAL_ENV_PARAMS
from flow.core.experiment import SumoExperiment
sumo_params = SumoParams(render=True,
sim_step=0.2)
env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS)
initial_config = InitialConfig()
scenario = MergeScenario(name="merge-example",
vehicles=vehicles,
net_params=net_params,
initial_config=initial_config)
env = AccelEnv(env_params, sumo_params, scenario)
exp = SumoExperiment(env, scenario)
_ = exp.run(1, 1500)
```
| github_jupyter |
```
%matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
import numpy as np
np.set_printoptions(precision=3, suppress=True)
import library.helpers as h
import matplotlib.pyplot as plt
import numpy as np
from sklearn.metrics import auc
from scipy.integrate import simps
from scipy.interpolate import interp1d
LOG_NAME = "unix_forensic"
VIZUALIZATIONS_DIR = "visualizations"
fig = plt.figure(figsize=(10,10))
# TODO redo this one [
MODEL_NAMES = [ "lstm-ae", "triplet-la", "triplet-jd-la-all-40-70-all-all", "triplet-jd-la-non-60-65-non-non", "triplet-jd-la-00500-60-65-all-all-00001",
"triplet-jd-la-00500-60-65-all-00001-non", "triplet-jd-la-01000-60-65-all-all-00001", "triplet-jd-la-05000-60-65-all-all-00001", "jd-la-x05000-pt60-nt65-llall-lcall-ee00001-ep20", "jd-la-xnon-pt40-nt70-llall-lcall-ee00001-ep50"]
MODEL_NAMES_JD = [
"triplet-jd-la-all-40-70-all-all", # baseline, all labels
"triplet-jd-la-non-60-65-non-non","triplet-jd-la-non-40-70-non-non", # jd no labels, 10 ep
"jd-la-xnon-pt60-nt65-llnon-lcnon-eenon-ep50",
"jd-la-xnon-pt30-nt75-llnon-lcnon-eenon-ep50"
#"jd-la-x02000-pt55-nt65-llnon-lc00003-ee00002-ep10","jd-la-x02000-pt55-nt65-llnon-lc00003-ee00002-ep20",
#"triplet-jd-la-05000-60-65-all-all-00001", "jd-la-x05000-pt60-nt65-llall-lcall-ee00001-ep20",
]
MODEL_NAMES_LA = [
"triplet-jd-la-all-40-70-all-all", # baseline, all labels
"jd-la-x00500-pt30-nt70-llall-lcall-ee00002-ep30", # 500 labels
"jd-la-x01000-pt30-nt70-llall-lcall-ee00002-ep30", # 1000 labels
"jd-la-x02000-pt30-nt70-llall-lcall-ee00002-ep30", # 2000 labels
"jd-la-x02500-pt30-nt70-llall-lcall-ee00002-ep30", # 2500 labels
"jd-la-x05000-pt30-nt70-llall-lcall-ee00002-ep30", # 5000 labels
]
MODEL_NAMES_NT60 = [
"jd-la-xnon-pt10-nt60-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt20-nt60-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt30-nt60-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt40-nt60-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt50-nt60-llnon-lcnon-ee00002-ep30",
]
MODEL_NAMES_NT70 = [
"jd-la-xnon-pt10-nt70-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt20-nt70-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt30-nt70-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt40-nt70-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt50-nt70-llnon-lcnon-ee00002-ep30",
"jd-la-xnon-pt60-nt70-llnon-lcnon-ee00002-ep30"
]
MODEL_NAMES_NT80 = [
"jd-la-xnon-pt10-nt80-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt20-nt80-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt30-nt80-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt40-nt80-llnon-lcnon-ee00002-ep30", #
"jd-la-xnon-pt50-nt80-llnon-lcnon-ee00002-ep30",
"jd-la-xnon-pt60-nt80-llnon-lcnon-ee00002-ep30",
"jd-la-xnon-pt70-nt80-llnon-lcnon-ee00002-ep30"
]
MODEL_NAMES_10 = [
#"jd-la-xnon-pt10-nt90-llnon-lcnon-ee00002-ep20",
#"jd-la-xnon-pt20-nt90-llnon-lcnon-ee00002-ep20",
#"jd-la-xnon-pt30-nt90-llnon-lcnon-ee00002-ep20",
#"jd-la-xnon-pt40-nt90-llnon-lcnon-ee00002-ep20",
"jd-la-xnon-pt50-nt90-llnon-lcnon-ee00002-ep20",
"jd-la-xnon-pt52-nt70-llnon-lcnon-ee00002-ep30",
#"jd-la-xnon-pt60-nt90-llnon-lcnon-ee00002-ep20",
#"jd-la-xnon-pt70-nt90-llnon-lcnon-ee00002-ep20",
#"jd-la-xnon-pt80-nt90-llnon-lcnon-ee00002-ep20",
"jd-la-xnon-pt52-nt90-llnon-lcnon-ee00002-ep200",
]
MODEL_NAMES = MODEL_NAMES_LA
# "triplet-jd-la-2000-55-70-40", "lstm-ae", "triplet-la", "triplet-jd-la-3000-55-70-55-30", "triplet-jd-la-ma-1500"
# ["triplet-jd-la-2000-55-70-40","triplet-jd-la-3000-55-70-55-30","triplet-jd-la-3000-50-70-55-30"]
# [ "triplet-jd-la-1500-055-70-40", "triplet-jd-la-1500-55-75-40"]
# "triplet-jd-la-1500-060-70-40", "triplet-jd-la-1500-055-70-40", "triplet-jd-la-1500-050-70-40",
# ["triplet-jd-la-1500-050-065-25", "triplet-jd-la-1500-065-070-25", "triplet-jd-la-1500-060-060-25"]
#["triplet-jd-la-ma-500-02-03","triplet-jd-la-ma-750","triplet-jd-la-ma-1000", # "#4169e1",
# "triplet-jd-la-ma-1500",
# "triplet-jaccard","triplet-jaccard-margin",
# "triplet-label", # all labels
# "lstm-ae"]
COLORS = h.get_N_HexCol(len(MODEL_NAMES)+1)
fac_n = np.arange(0, 1.0, 0.0005)
baseline_valid_accepts = h.load_from_json("data/ji_%s_basline-jaccard_valid.json"%LOG_NAME)
baseline_false_accepts = [np.round(f,5) for f in h.load_from_json("data/ji_%s_basline-jaccard_false.json"%LOG_NAME)]
interpolated_vac = interp1d(baseline_false_accepts, baseline_valid_accepts)
vac_n = interpolated_vac(fac_n)
auc_score = auc(fac_n, vac_n)
plt.plot( fac_n, interpolated_vac(fac_n), color='r', label="%0.3f,baseline"%auc_score)
plt.xscale("log")
for i, model_name in enumerate(MODEL_NAMES):
#print(model_name)
valid_accepts = h.load_from_json("data/%s_%s_valid.json"%(model_name, LOG_NAME))
false_accepts = h.load_from_json("data/%s_%s_false.json"%(model_name, LOG_NAME))
interpolated_vac = interp1d(false_accepts, valid_accepts)
auc_score = auc(fac_n, interpolated_vac(fac_n))
plt.plot( fac_n, interpolated_vac(fac_n) , color=COLORS[i+1], label="%0.3f, %s"%(auc_score, model_name))
plt.title("VAR/FAR (%s) LA"%LOG_NAME)
plt.xlabel('FAR')
plt.ylabel('VAL')
plt.legend(loc='lower right')
plt.show()
#plt.savefig("%s/roc_%s.png"%(VIZUALIZATIONS_DIR, LOG_NAME))
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pystan
import pybaseball
import arviz as az
player_names = ["Peter Alonso","Keston Hiura","Fernando Tatis Jr.","Harold Ramirez","Jose Trevino","Yordan Alvarez","Vladimir Guerrero Jr.","Steve Wilkerson"]
batter_data = pybaseball.batting_stats(2019)
batter_data = batter_data.loc[batter_data['Name'].isin(player_names)].set_index("Name")
batter_data
binomial_data = {}
for player in batter_data.index:
hits = int(batter_data.loc[player]["H"])
ab_minus_hits = int(batter_data.loc[player]["AB"]) - hits
ab_outcomes = [0]*ab_minus_hits + [1]*hits
binomial_data.update({player:ab_outcomes})
at_bat_totals = {}
for player in batter_data.index:
at_bat_count = int(batter_data.loc[player]["AB"])
at_bat_totals.update({player:at_bat_count})
hit_totals = {}
for player in batter_data.index:
hit_count = int(batter_data.loc[player]["H"])
hit_totals.update({player:hit_count})
```
$BA_i \sim Beta(81,219)$
$y_i \sim Bin(AB_i,BA_i)$
$i=1,2,...,8$
```
#https://mc-stan.org/users/documentation/case-studies/rstan_workflow.html
#https://people.duke.edu/~ccc14/sta-663/PyStan.html
#http://varianceexplained.org/statistics/beta_distribution_and_baseball/
model_code = '''
data {
int<lower=0> N;
int<lower=0> at_bats[N];
int<lower=0> hits[N];
real<lower=0> A;
real<lower=0> B;
}
parameters {
real<lower=0,upper=1> AVG[N];
}
model {
AVG ~ beta(A, B);
hits ~ binomial(at_bats, AVG);
}
generated quantities {
vector[N] log_lik;
vector[N] predicted_hits;
for (i in 1:N) {
log_lik[i] = binomial_lpmf(hits[i] | at_bats[i], AVG[i]);
predicted_hits[i] = binomial_rng(at_bats[i], AVG[i]);
}
}
'''
model_data = dict(N=8, hits=list(hit_totals.values()),at_bats=list(at_bat_totals.values()),A=81,B=219)
stan_model = pystan.StanModel(model_code=model_code)
fit = stan_model.sampling(data=model_data)
print(fit)
prior_model_code = '''
data {
int<lower=0> N;
real<lower=0> A;
real<lower=0> B;
}
parameters {
real<lower=0,upper=1> AVG[N];
}
model {
AVG ~ beta(A, B);
}
'''
prior_model_data = dict(N=8,A=81,B=219)
stan_model_prior = pystan.StanModel(model_code=prior_model_code)
prior_fit = stan_model_prior.sampling(data=prior_model_data)
stan_data = az.from_pystan(posterior=fit,
prior=prior_fit,
observed_data="hits",
posterior_predictive="predicted_hits",
log_likelihood="log_lik",
posterior_model=stan_model,
coords={"player":list(hit_totals.keys())},
dims={'AVG': ['player'], 'hits': ['player'], 'log_lik': ['player'], 'predicted_hits': ['player']})
density_plots = az.plot_density([stan_data.posterior,stan_data.prior],data_labels=["Posterior","Prior"])
az.plot_ppc(stan_data, data_pairs = {"hits" : "predicted_hits"},flatten=["player"])
```
| github_jupyter |
[](https://colab.research.google.com/github/ourownstory/neural_prophet/blob/master/example_notebooks/sub_daily_data_yosemite_temps.ipynb)
# Sub-daily data
NeuralProphet can make forecasts for time series with sub-daily observations by passing in a dataframe with timestamps in the ds column. The format of the timestamps should be `YYYY-MM-DD HH:MM:SS` - see the example csv [here](https://github.com/ourownstory/neural_prophet/blob/master/example_data/yosemite_temps.csv). When sub-daily data are used, daily seasonality will automatically be fit.
Here we fit NeuralProphet to data with 5-minute resolution (daily temperatures at Yosemite).
```
if 'google.colab' in str(get_ipython()):
!pip install git+https://github.com/ourownstory/neural_prophet.git # may take a while
#!pip install neuralprophet # much faster, but may not have the latest upgrades/bugfixes
data_location = "https://raw.githubusercontent.com/ourownstory/neural_prophet/master/"
else:
data_location = "../"
import pandas as pd
from neuralprophet import NeuralProphet, set_log_level
# set_log_level("ERROR")
df = pd.read_csv(data_location + "example_data/yosemite_temps.csv")
```
Now we will attempt to forecast the next 7 days. The `5min` data resulution means that we have `60/5*24=288` daily values. Thus, we want to forecast `7*288` periods ahead.
Using some common sense, we set:
* First, we disable weekly seasonality, as nature does not follow the human week's calendar.
* Second, we disable changepoints, as the dataset only contains two months of data
```
m = NeuralProphet(
n_changepoints=0,
weekly_seasonality=False,
)
metrics = m.fit(df, freq='5min')
future = m.make_future_dataframe(df, periods=7*288, n_historic_predictions=len(df))
forecast = m.predict(future)
fig = m.plot(forecast)
# fig_comp = m.plot_components(forecast)
fig_param = m.plot_parameters()
```
The daily seasonality seems to make sense, when we account for the time being recorded in GMT, while Yosemite local time is GMT-8.
## Improving trend and seasonality
As we have `288` daily values recorded, we can increase the flexibility of `daily_seasonality`, without danger of overfitting.
Further, we may want to re-visit our decision to disable changepoints, as the data clearly shows changes in trend, as is typical with the weather. We make the following changes:
* increase the `changepoints_range`, as the we are doing a short-term prediction
* inrease the `n_changepoints` to allow to fit to the sudden changes in trend
* carefully regularize the trend changepoints by setting `trend_reg` in order to avoid overfitting
```
m = NeuralProphet(
changepoints_range=0.95,
n_changepoints=50,
trend_reg=1.5,
weekly_seasonality=False,
daily_seasonality=10,
)
metrics = m.fit(df, freq='5min')
future = m.make_future_dataframe(df, periods=60//5*24*7, n_historic_predictions=len(df))
forecast = m.predict(future)
fig = m.plot(forecast)
# fig_comp = m.plot_components(forecast)
fig_param = m.plot_parameters()
```
| github_jupyter |
# Deep Deterministic Policy Gradients (DDPG)
---
In this notebook, we train DDPG with OpenAI Gym's Pendulum-v0 environment.
### 1. Import the Necessary Packages
```
import gym
import random
import torch
import numpy as np
from collections import deque
import matplotlib.pyplot as plt
%matplotlib inline
from ddpg_agent import Agent
```
### 2. Instantiate the Environment and Agent
```
env = gym.make('Pendulum-v0')
env.seed(2)
agent = Agent(state_size=3, action_size=1, random_seed=2)
```
Observation
Type: Box(3)
|Num |Observation |Min| Max|
| --- | --- | --- |--- |
0 | cos(theta) | -1.0 |1.0|
1 | sin(theta) |-1.0 |1.0|
2 | theta dot |-8.0 |8.0|
Actions
Type: Box(1)
| Num | Action | Min | Max|
| --- | --- | --- |--- |
| 0 | Joint effort | -2.0 | 2.0|
### 3. Train the Agent with DDPG
```
def ddpg(n_episodes=100, max_t=300, print_every=100):
scores_deque = deque(maxlen=print_every)
scores = []
for i_episode in range(1, n_episodes+1):
state = env.reset()
agent.reset()
score = 0
for t in range(max_t):
action = agent.act(state)
next_state, reward, done, _ = env.step(action)
agent.step(state, action, reward, next_state, done)
state = next_state
score += reward
if done:
break
scores_deque.append(score)
scores.append(score)
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)), end="")
torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')
if i_episode % print_every == 0:
print('\rEpisode {}\tAverage Score: {:.2f}'.format(i_episode, np.mean(scores_deque)))
return scores
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores)
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
```
### 4. Watch a Smart Agent!
```
agent.actor_local.load_state_dict(torch.load('checkpoint_actor.pth'))
agent.critic_local.load_state_dict(torch.load('checkpoint_critic.pth'))
state = env.reset()
for t in range(500):
action = agent.act(state, add_noise=False)
env.render()
state, reward, done, _ = env.step(action)
if done:
break
env.close()
```
### 6. Explore
In this exercise, we have provided a sample DDPG agent and demonstrated how to use it to solve an OpenAI Gym environment. To continue your learning, you are encouraged to complete any (or all!) of the following tasks:
- Amend the various hyperparameters and network architecture to see if you can get your agent to solve the environment faster than this benchmark implementation. Once you build intuition for the hyperparameters that work well with this environment, try solving a different OpenAI Gym task!
- Write your own DDPG implementation. Use this code as reference only when needed -- try as much as you can to write your own algorithm from scratch.
- You may also like to implement prioritized experience replay, to see if it speeds learning.
- The current implementation adds Ornsetein-Uhlenbeck noise to the action space. However, it has [been shown](https://blog.openai.com/better-exploration-with-parameter-noise/) that adding noise to the parameters of the neural network policy can improve performance. Make this change to the code, to verify it for yourself!
- Write a blog post explaining the intuition behind the DDPG algorithm and demonstrating how to use it to solve an RL environment of your choosing.
```
int(1e5)
```
| github_jupyter |
<center>
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
</center>
# Classes and Objects in Python
Estimated time needed: **40** minutes
## Objectives
After completing this lab you will be able to:
- Work with classes and objects
- Identify and define attributes and methods
<h2>Table of Contents</h2>
<div class="alert alert-block alert-info" style="margin-top: 20px">
<ul>
<li>
<a href="#intro">Introduction to Classes and Objects</a>
<ul>
<li><a href="create">Creating a class</a></li>
<li><a href="instance">Instances of a Class: Objects and Attributes</a></li>
<li><a href="method">Methods</a></li>
</ul>
</li>
<li><a href="creating">Creating a class</a></li>
<li><a href="circle">Creating an instance of a class Circle</a></li>
<li><a href="rect">The Rectangle Class</a></li>
</ul>
</div>
<hr>
<h2 id="intro">Introduction to Classes and Objects</h2>
<h3>Creating a Class</h3>
The first part of creating a class is giving it a name: In this notebook, we will create two classes, Circle and Rectangle. We need to determine all the data that make up that class, and we call that an attribute. Think about this step as creating a blue print that we will use to create objects. In figure 1 we see two classes, circle and rectangle. Each has their attributes, they are variables. The class circle has the attribute radius and color, while the rectangle has the attribute height and width. Let’s use the visual examples of these shapes before we get to the code, as this will help you get accustomed to the vocabulary.
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesClass.png" width="500" />
<i>Figure 1: Classes circle and rectangle, and each has their own attributes. The class circle has the attribute radius and colour, the rectangle has the attribute height and width.</i>
<h3 id="instance">Instances of a Class: Objects and Attributes</h3>
An instance of an object is the realisation of a class, and in Figure 2 we see three instances of the class circle. We give each object a name: red circle, yellow circle and green circle. Each object has different attributes, so let's focus on the attribute of colour for each object.
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesObj.png" width="500" />
<i>Figure 2: Three instances of the class circle or three objects of type circle.</i>
The colour attribute for the red circle is the colour red, for the green circle object the colour attribute is green, and for the yellow circle the colour attribute is yellow.
<h3 id="method">Methods</h3>
Methods give you a way to change or interact with the object; they are functions that interact with objects. For example, let’s say we would like to increase the radius by a specified amount of a circle. We can create a method called **add_radius(r)** that increases the radius by **r**. This is shown in figure 3, where after applying the method to the "orange circle object", the radius of the object increases accordingly. The “dot” notation means to apply the method to the object, which is essentially applying a function to the information in the object.
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesMethod.png" width="500" />
<i>Figure 3: Applying the method “add_radius” to the object orange circle object.</i>
<hr>
<h2 id="creating">Creating a Class</h2>
Now we are going to create a class circle, but first, we are going to import a library to draw the objects:
```
# Import the library
import matplotlib.pyplot as plt
%matplotlib inline
```
The first step in creating your own class is to use the <code>class</code> keyword, then the name of the class as shown in Figure 4. In this course the class parent will always be object:
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesDefine.png" width="400" />
<i>Figure 4: Creating a class Circle.</i>
The next step is a special method called a constructor <code>__init__</code>, which is used to initialize the object. The input are data attributes. The term <code>self</code> contains all the attributes in the set. For example the <code>self.color</code> gives the value of the attribute color and <code>self.radius</code> will give you the radius of the object. We also have the method <code>add_radius()</code> with the parameter <code>r</code>, the method adds the value of <code>r</code> to the attribute radius. To access the radius we use the syntax <code>self.radius</code>. The labeled syntax is summarized in Figure 5:
<img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/labs/Module%203/images/ClassesCircle.png" width="600" />
<i>Figure 5: Labeled syntax of the object circle.</i>
The actual object is shown below. We include the method <code>drawCircle</code> to display the image of a circle. We set the default radius to 3 and the default colour to blue:
```
# Create a class Circle
class Circle(object):
# Constructor
def __init__(self, radius=3, color='blue'):
self.radius = radius
self.color = color
# Method
def add_radius(self, r):
self.radius = self.radius + r
return(self.radius)
# Method
def drawCircle(self):
plt.gca().add_patch(plt.Circle((0, 0), radius=self.radius, fc=self.color))
plt.axis('scaled')
plt.show()
```
<hr>
<h2 id="circle">Creating an instance of a class Circle</h2>
Let’s create the object <code>RedCircle</code> of type Circle to do the following:
```
# Create an object RedCircle
RedCircle = Circle(10, 'red')
```
We can use the <code>dir</code> command to get a list of the object's methods. Many of them are default Python methods.
```
# Find out the methods can be used on the object RedCircle
dir(RedCircle)
```
We can look at the data attributes of the object:
```
# Print the object attribute radius
RedCircle.radius
# Print the object attribute color
RedCircle.color
```
We can change the object's data attributes:
```
# Set the object attribute radius
RedCircle.radius = 1
RedCircle.radius
```
We can draw the object by using the method <code>drawCircle()</code>:
```
# Call the method drawCircle
RedCircle.drawCircle()
```
We can increase the radius of the circle by applying the method <code>add_radius()</code>. Let increases the radius by 2 and then by 5:
```
# Use method to change the object attribute radius
print('Radius of object:',RedCircle.radius)
RedCircle.add_radius(2)
print('Radius of object of after applying the method add_radius(2):',RedCircle.radius)
RedCircle.add_radius(5)
print('Radius of object of after applying the method add_radius(5):',RedCircle.radius)
RedCircle.add_radius(6)
print('Radius of object of after applying the method add radius(6):',RedCircle.radius)
```
Let’s create a blue circle. As the default colour is blue, all we have to do is specify what the radius is:
```
# Create a blue circle with a given radius
BlueCircle = Circle(radius=100)
```
As before we can access the attributes of the instance of the class by using the dot notation:
```
# Print the object attribute radius
BlueCircle.radius
# Print the object attribute color
BlueCircle.color
```
We can draw the object by using the method <code>drawCircle()</code>:
```
# Call the method drawCircle
BlueCircle.drawCircle()
```
Compare the x and y axis of the figure to the figure for <code>RedCircle</code>; they are different.
<hr>
<h2 id="rect">The Rectangle Class</h2>
Let's create a class rectangle with the attributes of height, width and color. We will only add the method to draw the rectangle object:
```
# Create a new Rectangle class for creating a rectangle object
class Rectangle(object):
# Constructor
def __init__(self, width=2, height=3, color='r'):
self.height = height
self.width = width
self.color = color
# Method
def drawRectangle(self):
plt.gca().add_patch(plt.Rectangle((0, 0), self.width, self.height ,fc=self.color))
plt.axis('scaled')
plt.show()
```
Let’s create the object <code>SkinnyBlueRectangle</code> of type Rectangle. Its width will be 2 and height will be 3, and the color will be blue:
```
# Create a new object rectangle
SkinnyBlueRectangle = Rectangle(2, 10, 'blue')
```
As before we can access the attributes of the instance of the class by using the dot notation:
```
# Print the object attribute height
SkinnyBlueRectangle.height
# Print the object attribute width
SkinnyBlueRectangle.width
# Print the object attribute color
SkinnyBlueRectangle.color
```
We can draw the object:
```
# Use the drawRectangle method to draw the shape
SkinnyBlueRectangle.drawRectangle()
```
Let’s create the object <code>FatYellowRectangle</code> of type Rectangle :
```
# Create a new object rectangle
FatYellowRectangle = Rectangle(20, 5, 'yellow')
```
We can access the attributes of the instance of the class by using the dot notation:
```
# Print the object attribute height
FatYellowRectangle.height
# Print the object attribute width
FatYellowRectangle.width
# Print the object attribute color
FatYellowRectangle.color
```
We can draw the object:
```
# Use the drawRectangle method to draw the shape
FatYellowRectangle.drawRectangle()
```
<hr>
<h2 id="rect">Exercises</h2>
<h4> Text Analysis </h4>
You have been recruited by your friend, a linguistics enthusiast, to create a utility tool that can perform analysis on a given piece of text. Complete the class
'analysedText' with the following methods -
<ul>
<li> Constructor - Takes argument 'text',makes it lower case and removes all punctuation. Assume only the following punctuation is used - period (.), exclamation mark (!), comma (,) and question mark (?). Store the argument in "fmtText"
<li> freqAll - returns a dictionary of all unique words in the text along with the number of their occurences.
<li> freqOf - returns the frequency of the word passed in argument.
</ul>
The skeleton code has been given to you. Docstrings can be ignored for the purpose of the exercise. <br>
<i> Hint: Some useful functions are <code>replace()</code>, <code>lower()</code>, <code>split()</code>, <code>count()</code> </i><br>
```
class analysedText(object):
def __init__ (self, text):
reArrText = text.lower()
reArrText = reArrText.replace('.','').replace('!','').replace(',','').replace('?','')
self.fmtText = reArrText
def freqAll(self):
wordList = self.fmtText.split(' ')
freqMap = {}
for word in set(wordList): # use set to remove duplicates in list
freqMap[word] = wordList.count(word)
return freqMap
def freqOf(self,word):
freqDict = self.freqAll()
if word in freqDict:
return freqDict[word]
else:
return 0
```
Execute the block below to check your progress.
```
import sys
sampleMap = {'eirmod': 1,'sed': 1, 'amet': 2, 'diam': 5, 'consetetur': 1, 'labore': 1, 'tempor': 1, 'dolor': 1, 'magna': 2, 'et': 3, 'nonumy': 1, 'ipsum': 1, 'lorem': 2}
def testMsg(passed):
if passed:
return 'Test Passed'
else :
return 'Test Failed'
print("Constructor: ")
try:
samplePassage = analysedText("Lorem ipsum dolor! diam amet, consetetur Lorem magna. sed diam nonumy eirmod tempor. diam et labore? et diam magna. et diam amet.")
print(testMsg(samplePassage.fmtText == "lorem ipsum dolor diam amet consetetur lorem magna sed diam nonumy eirmod tempor diam et labore et diam magna et diam amet"))
except:
print("Error detected. Recheck your function " )
print("freqAll: ",)
try:
wordMap = samplePassage.freqAll()
print(testMsg(wordMap==sampleMap))
except:
print("Error detected. Recheck your function " )
print("freqOf: ")
try:
passed = True
for word in sampleMap:
if samplePassage.freqOf(word) != sampleMap[word]:
passed = False
break
print(testMsg(passed))
except:
print("Error detected. Recheck your function " )
```
<details><summary>Click here for the solution</summary>
```python
class analysedText(object):
def __init__ (self, text):
# remove punctuation
formattedText = text.replace('.','').replace('!','').replace('?','').replace(',','')
# make text lowercase
formattedText = formattedText.lower()
self.fmtText = formattedText
def freqAll(self):
# split text into words
wordList = self.fmtText.split(' ')
# Create dictionary
freqMap = {}
for word in set(wordList): # use set to remove duplicates in list
freqMap[word] = wordList.count(word)
return freqMap
def freqOf(self,word):
# get frequency map
freqDict = self.freqAll()
if word in freqDict:
return freqDict[word]
else:
return 0
```
</details>
<hr>
<h2>The last exercise!</h2>
<p>Congratulations, you have completed your first lesson and hands-on lab in Python. However, there is one more thing you need to do. The Data Science community encourages sharing work. The best way to share and showcase your work is to share it on GitHub. By sharing your notebook on GitHub you are not only building your reputation with fellow data scientists, but you can also show it off when applying for a job. Even though this was your first piece of work, it is never too early to start building good habits. So, please read and follow <a href="https://cognitiveclass.ai/blog/data-scientists-stand-out-by-sharing-your-notebooks/" target="_blank">this article</a> to learn how to share your work.
<hr>
## Author
<a href="https://www.linkedin.com/in/joseph-s-50398b136/" target="_blank">Joseph Santarcangelo</a>
## Other contributors
<a href="www.linkedin.com/in/jiahui-mavis-zhou-a4537814a">Mavis Zhou</a>
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | ---------- | ---------------------------------- |
| 2020-08-26 | 2.0 | Lavanya | Moved lab to course repo in GitLab |
| | | | |
| | | | |
<hr/>
## <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
| github_jupyter |
# Analyzing the Effects of Non-Academic Features on Student Performance
```
# For reading data sets
import pandas
# For lots of awesome things
import numpy as np
# Need this for LabelEncoder
from sklearn import preprocessing
# For building our net
import keras
# For plotting
import matplotlib.pyplot as plt
%matplotlib inline
```
## Read in data
### Data is seperated by a semicolon (delimiter=";") containing column names as the first row of the file (header = 0).
```
# Read in student data
student_data = np.array(pandas.read_table("./student-por.csv",
delimiter=";", header=0))
# Display student data
student_data
```
### Determine what the column labels are...
```
# Descriptions for each feature (found in the header)
feature_descrips = np.array(pandas.read_csv("./student-por.csv",
delimiter=";", header=None, nrows=1))
# Display descriptions
print(feature_descrips)
```
### ...and give them clearer descriptions.
```
# More detailed descriptions
feature_descrips = np.array(["School", "Sex", "Age", "Urban or Rural Address", "Family Size",
"Parent's Cohabitation status", "Mother's Education", "Father's Education",
"Mother's Job", "Father's Job", "Reason for Choosing School",
"Student's Gaurdain", "Home to School Travel Time", "Weekly Study Time",
"Number of Past Class Failures", "Extra Educational Support",
"Family Educational Support", "Extra Paid Classes", "Extra Curricular Activities",
"Attended Nursery School", "Wants to Take Higher Education", "Internet Access at Home",
"In a Romantic Relationship", "Quality of Family Relationships",
"Free Time After School", "Time Spent Going out With Friends",
"Workday Alcohol Consumption", "Weekend Alcohol Consumption",
"Current Health Status", "Number of Student Absences", "First Period Grade",
"Second Period Grade", "Final Grade"])
```
# Data Cleanup
## Shuffle data
### We sampled 2 schools, and right now our data has each school grouped together. We need to get rid of this grouping for training later down the road.
```
# Shuffle the data!
np.random.shuffle(student_data)
student_data
```
## Alphabetically classify scores
### Because our data is sampled from Portugal, we have to modify their scoring system a bit to represent something more like ours.
### 0 = F
### 1 = D
### 2 = C
### 3 = B
### 4 = A
```
# Array holding final scores for every student
scores = student_data[:,32]
# Iterate through list of scores, changing them from a 0-19 value
## to a 0-4 value (representing F-A)
for i in range(len(scores)):
if(scores[i] > 18):
scores[i] = 4
elif(scores[i] > 16):
scores[i] = 3
elif(scores[i] > 14):
scores[i] = 2
elif(scores[i] > 12):
scores[i] = 1
else:
scores[i] = 0
# Update the final scores in student_data to reflect these changes
for i in range(len(scores)):
student_data[i,32] = scores[i]
# Display new data. Hint: Look at the last column
student_data
```
## Encoding non-numeric data to integers
```
# One student sample
student_data[0,:]
```
### We have some qualitative data from the questionaire that needs to be converted to represent numbers.
```
# Label Encoder
le = preprocessing.LabelEncoder()
# Columns that hold non-numeric data
indices = np.array([0,1,3,4,5,8,9,10,11,15,16,17,18,19,20,21,22])
# Transform the non-numeric data in these columns to integers
for i in range(len(indices)):
column = indices[i]
le.fit(student_data[:,column])
student_data[:,column] = le.transform(student_data[:,column])
student_data[0,:]
```
## Encoding 0's to -1 for binomial data.
### We want our weights to change because 0 represents something! Therefore, we need to encode 0's to -1's so the weights will change with that input.
```
# Columns that hold binomial data
indices = np.array([0,1,3,4,5,15,16,17,18,19,20,21,22])
# Change 0's to -1's
for i in range(len(indices)):
column = indices[i]
# values of current feature
feature = student_data[:,column]
# change values to -1 if equal to 0
feature = np.where(feature==0, -1, feature)
student_data[:,column] = feature
student_data[0,:]
```
## Standardizing the nominal and numerical data.
### We need our input to matter equally (Everyone is important!). We do this by standardizing our data (get a mean of 0 and a stardard deviation of 1).
```
scaler = preprocessing.StandardScaler()
temp = student_data[:,[2,6,7,8,9,10,11,12,13,14,23,24,25,26,27,28,29,30,31]]
print(student_data[0,:])
Standardized = scaler.fit_transform(temp)
print('Mean:', round(Standardized.mean()))
print('Standard deviation:', Standardized.std())
student_data[:,[2,6,7,8,9,10,11,12,13,14,23,24,25,26,27,28,29,30,31]] = Standardized
student_data[0,:]
```
## Convert results to one-hot encoding
```
# Final grades
results = student_data[:,32]
# Take a look at first 5 final grades
print("First 5 final grades:", results[0:5])
# All unique values for final grades (0-4 representing F-A)
possible_results = np.unique(student_data[:,32]).T
print("All possible results:", possible_results)
# One-hot encode final grades (results) which will be used as our output
# The length of the "ID" should be as long as the total number of possible results so each results
## gets its own, personal one-hot encoding
y = keras.utils.to_categorical(results,len(possible_results))
# Take a look at the first 5 final grades now (no longer numbers but arrays)
y[0:5]
# our input, all features except final grades
x = student_data[:,0:32]
```
# Model Building
#### Now let's create a function that will build a model for us. This will come in handy later on. Our model will have two hidden layers. The first hidden layer will have an input size of 800, and the second will have an input size of 400. The optimizer that we are using is adamax which is good at ignoring noise in a datset. The loss function we are using is called categorical cross entropy and which is useful for trying to classify or label something. In this case, we are trying to classify students by letter grades, so this loss function will be of great use to us.
```
# Function to create network given model
def create_network(model):
# Specify input/output size
input_size = x.shape[1]
output_size = y.shape[1]
# Create the hidden layer
model.add(keras.layers.Dense(800, input_dim = input_size, activation = 'relu'))
# Additional hidden layer
model.add(keras.layers.Dense(400,activation='relu'))
# Output layer
model.add(keras.layers.Dense(output_size,activation='softmax'))
# Compile - why using adamax?
model.compile(loss='categorical_crossentropy',
optimizer='adamax',
metrics=['accuracy'])
# Feed-forward model
model = keras.Sequential()
create_network(model)
```
# Initial Test of the Network
```
# Split data into training and testing data
x_train = x[0:518,:]
x_test = x[519:649,:]
y_train = y[0:518,:]
y_test = y[519:649,:]
# Train on training data!
# We're saving this information in the variable -history- so we can take a look at it later
history = model.fit(x_train, y_train,
batch_size = 32,
epochs = 7,
verbose = 0,
validation_split = 0.2)
# Validate using data the network hasn't seen before (testing data)
# Save this info in -score- so we can take a look at it
score = model.evaluate(x_test,y_test, verbose=0)
# Check it's effectiveness
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# Plot the data
def plot(history):
plt.figure(1)
# Summarize history for accuracy
plt.subplot(211)
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train','test'], loc ='upper left')
# Summarize history for loss
plt.subplot(212)
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train','test'], loc ='upper left')
# Display plot
plt.tight_layout()
plt.show()
# Plot current training and validation accuracy and loss
plot(history)
```
# Training and Testing Without Individual Features
```
# Analyze the effects of removing one feature on training
def remove_and_analyze(feature):
# Told you those feature descriptions would be useful
print("Without feature", feature, ":", feature_descrips[feature])
# Create feed-forward network
model = keras.Sequential()
create_network(model)
# Remove feature from columns (axis 1)
x = np.delete(student_data, feature, axis = 1)
# Split data into training and testing data
x_train = x[0:518,:]
x_test = x[519:649,:]
# Train on training data!
history = model.fit(x_train, y_train,
batch_size = 32,
epochs = 7,
verbose = 0,
validation_split = 0.2)
# Validate using data the network hasn't seen before (testing data)
score = model.evaluate(x_test,y_test, verbose=0)
# Check it's effectiveness
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# Plot the data
plot(history)
# Analyze the effects of removing one feature on training
# Do this for all input features
for i in range(student_data.shape[1]-1):
remove_and_analyze(i)
print("\n \n \n")
```
# Training and Testing Without Five Features
```
# Delete the five features that most negatively impact accuracy
x = np.delete(student_data, 21, axis = 1)
x = np.delete(x, 20, axis = 1)
x = np.delete(x, 9, axis = 1)
x = np.delete(x, 8, axis = 1)
x = np.delete(x, 7, axis = 1)
# Create feed-forward network
model = keras.Sequential()
create_network(model)
# Split data into training and testing data
x_train = x[0:518,:]
x_test = x[519:649,:]
# Train on training data!
history = model.fit(x_train, y_train,
batch_size = 32,
epochs = 7,
verbose = 0,
validation_split = 0.2)
# Validate using data the network hasn't seen before (testing data)
score = model.evaluate(x_test,y_test, verbose=0)
# Check it's effectiveness
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# Plot the data
plot(history)
```
# Grade Distribution Analysis
```
# Function for analyzing the percent of students with each grade [F,D,C,B,A]
def analyze(array):
# To hold the total number of students with a certain final grade
# Index 0 - F. Index 4 - A
sums = np.array([0,0,0,0,0])
# Iterate through array. Update sums according to whether a student got a final grade of a(n)
for i in range(len(array)):
# F
if(array[i]==0):
sums[0] += 1
# D
elif(array[i]==1):
sums[1] +=1
# C
elif(array[i]==2):
sums[2] +=1
# B
elif(array[i]==3):
sums[3] +=1
# A
else:
sums[4] += 1
# Total number of students
total = sums[0] + sums[1] + sums[2] + sums[3] + sums[4]
# Hold percentage of students with grade of [F,D,C,B,A]
percentages = np.array([sums[0]/total*100,
sums[1]/total*100,
sums[2]/total*100,
sums[3]/total*100,
sums[4]/total*100])
# One bar for each of the 5 grades
x = np.array([1,2,3,4,5])
# Descriptions for each bar. None on y-axis
plt.xticks(np.arange(6), ('', 'F', 'D', 'C', 'B','A'))
# X axis - grades. Y axis - percentage of students with each grade
plt.bar(x,percentages)
plt.xlabel("Grades")
plt.ylabel("Percentage of Students")
# Display bar graph
plt.show()
# Display percentages
print(percentages)
```
## Family Educational Support
```
# Array holding final grades of all students who have family educational support
fam_sup = []
# Array holding final grades of all students who have family educational support
no_fam_sup = []
# Iterate through all student samples
for i in range(student_data.shape[0]):
# Does the student have family educational support? (-1 no, 1 yes)
sup = student_data[i][16]
# Append student's final grade to corresponding array
if(sup==1):
fam_sup.append(student_data[i][32])
else:
no_fam_sup.append(student_data[i][32])
```
### Family Educational Support
```
analyze(fam_sup)
```
### No Family Educational Support
```
analyze(no_fam_sup)
```
## Reason for choosing school
```
# Each array holds the grades of students who chose to go to their school for that reason
# Close to home
reason1 = []
# School reputation
reason2 = []
# Course prefrence
reason3 = []
# Other
reason4 = []
# Values that represent these unique reasons. They are not integer numbers like in the previous
## example. They're floatig point numbers so we'll save them so we can compare them to the value
## of this feature in each sample
unique_reasons = np.unique(student_data[:,10])
# Iterate through all student samples and append final grades to corresponding arrays
for i in range(student_data.shape[0]):
reason = student_data[i][10]
if(reason==unique_reasons[0]):
reason1.append(student_data[i][32])
elif(reason==unique_reasons[1]):
reason2.append(student_data[i][32])
elif(reason==unique_reasons[2]):
reason3.append(student_data[i][32])
else:
reason4.append(student_data[i][32])
```
### Reason 1: Close to Home
```
analyze(reason1)
```
### Reason 2: School Reputation
```
analyze(reason2)
```
### Reason 3: Course Prefrence
```
analyze(reason3)
```
### Reason 4: Other
```
analyze(reason4)
```
## Frequency of Going Out With Friends
```
# Each array holds the grades of students who go out with friends for that specified amount of time
# (1 - very low, 5 - very high)
go_out1 = []
go_out2 = []
go_out3 = []
go_out4 = []
go_out5 = []
# Floating point values representing frequency
unique = np.unique(student_data[:,25])
# Iterate through all student samples and append final grades to corresponding arrays
for i in range(student_data.shape[0]):
frequency = student_data[i][25]
if(frequency==unique[0]):
go_out1.append(student_data[i][32])
elif(frequency==unique[1]):
go_out2.append(student_data[i][32])
elif(frequency==unique[2]):
go_out3.append(student_data[i][32])
elif(frequency==unique[3]):
go_out4.append(student_data[i][32])
else:
go_out5.append(student_data[i][32])
analyze(go_out1)
analyze(go_out2)
analyze(go_out3)
analyze(go_out4)
analyze(go_out5)
```
## Free Time after School
```
# Each array holds the grades of students who have the specified amount of free time after school
# (1 - very low, 5 - very high)
free1 = []
free2 = []
free3 = []
free4 = []
free5 = []
# Floating point values representing frequency
unique = np.unique(student_data[:,24])
# Iterate through all student samples and append final grades to corresponding arrays
for i in range(student_data.shape[0]):
frequency = student_data[i][24]
if(frequency==unique[0]):
free1.append(student_data[i][32])
elif(frequency==unique[1]):
free2.append(student_data[i][32])
elif(frequency==unique[2]):
free3.append(student_data[i][32])
elif(frequency==unique[3]):
free4.append(student_data[i][32])
else:
free5.append(student_data[i][32])
analyze(free1)
analyze(free2)
analyze(free3)
analyze(free4)
analyze(free5)
```
## Paid Classes
```
# Array holding final grades of all students who have extra paid classes
paid_class = []
# Array holding final grades of all students who do not have extra paid classes
no_paid_class = []
# Iterate through all student samples and append final grades to corresponding arrays
for i in range(student_data.shape[0]):
paid = student_data[i][17]
if(paid==1):
paid_class.append(student_data[i][32])
else:
no_paid_class.append(student_data[i][32])
```
### Extra Paid Classes
```
analyze(paid_class)
```
### No Extra Paid Classes
```
analyze(no_paid_class)
```
| github_jupyter |
```
users = [
{ "id": 0, "name": "Hero" },
{ "id": 1, "name": "Dunn" },
{ "id": 2, "name": "Sue" },
{ "id": 3, "name": "Chi" },
{ "id": 4, "name": "Thor" },
{ "id": 5, "name": "Clive" },
{ "id": 6, "name": "Hicks" },
{ "id": 7, "name": "Devin" },
{ "id": 8, "name": "Kate" },
{ "id": 9, "name": "Klein" }
]
# “friendship” data, represented as a list of pairs of IDs
# the tuple (0, 1) indicates that the data scientist with id 0 (Hero) and
# the data scientist with id 1 (Dunn) are friends.
friendships = [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (3, 4),
(4, 5), (5, 6), (5, 7), (6, 8), (7, 8), (8, 9)]
```
# 1) Add a list of friends to each user
```
# set each user’s friends property to an empty list:
for user in users:
user["friends"] = []
print users
print users[0]['friends']
# then we populate the lists using the friendships data:
for i, j in friendships:
# this works because users[i] is the user whose id is i
users[i]["friends"].append(users[j]) # add i as a friend of j
users[j]["friends"].append(users[i]) # add j as a friend of i
print users[0]
```
# 2) what’s the average number of connections
Once each user dict contains a list of friends, we can easily ask questions of our
graph, like “what’s the average number of connections?”
First we find the total number of connections, by summing up the lengths of all the
friends lists:
```
def number_of_friends(user):
"""how many friends does _user_ have?"""
return len(user["friends"]) # length of friend_ids list
total_connections = sum(number_of_friends(user)
for user in users) # 24
print total_connections
# And then we just divide by the number of users:
from __future__ import division # integer division is lame
num_users = len(users) # length of the users list
print num_users
avg_connections = total_connections / num_users # 2.4
print avg_connections
```
It’s also easy to find the most connected people—they’re the people who have the largest
number of friends.
Since there aren’t very many users, we can sort them from “most friends” to “least
friends”:
```
# create a list (user_id, number_of_friends)
num_friends_by_id = [(user["id"], number_of_friends(user))
for user in users]
print num_friends_by_id
sorted(num_friends_by_id, # get it sorted
key=lambda (user_id, num_friends): num_friends, # by num_friends
reverse=True) # largest to smallest
# each pair is (user_id, num_friends)
# [(1, 3), (2, 3), (3, 3), (5, 3), (8, 3),
# (0, 2), (4, 2), (6, 2), (7, 2), (9, 1)]
```
# 3) Data Scientists You May Know
Your first instinct is to suggest that a user might know the friends of friends. These
are easy to compute: for each of a user’s friends, iterate over that person’s friends, and
collect all the results:
```
def friends_of_friend_ids_bad(user):
# "foaf" is short for "friend of a friend"
return [foaf["id"]
for friend in user["friends"] # for each of user's friends
for foaf in friend["friends"]] # get each of _their_ friends
friends_of_friend_ids_bad(users[0])
```
It includes user 0 (twice), since Hero is indeed friends with both of his friends. It
includes users 1 and 2, although they are both friends with Hero already. And it
includes user 3 twice, as Chi is reachable through two different friends:
```
print [friend["id"] for friend in users[0]["friends"]] # [1, 2]
print [friend["id"] for friend in users[1]["friends"]] # [0, 2, 3]
print [friend["id"] for friend in users[2]["friends"]] # [0, 1, 3]
```
Knowing that people are friends-of-friends in multiple ways seems like interesting
information, so maybe instead we should produce a count of mutual friends. And we
definitely should use a helper function to exclude people already known to the user:
```
from collections import Counter # not loaded by default
def not_the_same(user, other_user):
"""two users are not the same if they have different ids"""
return user["id"] != other_user["id"]
def not_friends(user, other_user):
"""other_user is not a friend if he's not in user["friends"];
that is, if he's not_the_same as all the people in user["friends"]"""
return all(not_the_same(friend, other_user)
for friend in user["friends"])
def friends_of_friend_ids(user):
return Counter(foaf["id"]
for friend in user["friends"] # for each of my friends
for foaf in friend["friends"] # count *their* friends
if not_the_same(user, foaf) # who aren't me
and not_friends(user, foaf)) # and aren't my friends
print friends_of_friend_ids(users[3]) # Counter({0: 2, 5: 1})
```
This correctly tells Chi (id 3) that she has two mutual friends with Hero (id 0) but
only one mutual friend with Clive (id 5).
As a data scientist, you know that you also might enjoy meeting users with similar
interests. (This is a good example of the “substantive expertise” aspect of data science.)
After asking around, you manage to get your hands on this data, as a list of
pairs (user_id, interest):
```
interests = [
(0, "Hadoop"), (0, "Big Data"), (0, "HBase"), (0, "Java"),
(0, "Spark"), (0, "Storm"), (0, "Cassandra"),
(1, "NoSQL"), (1, "MongoDB"), (1, "Cassandra"), (1, "HBase"),
(1, "Postgres"), (2, "Python"), (2, "scikit-learn"), (2, "scipy"),
(2, "numpy"), (2, "statsmodels"), (2, "pandas"), (3, "R"), (3, "Python"),
(3, "statistics"), (3, "regression"), (3, "probability"),
(4, "machine learning"), (4, "regression"), (4, "decision trees"),
(4, "libsvm"), (5, "Python"), (5, "R"), (5, "Java"), (5, "C++"),
(5, "Haskell"), (5, "programming languages"), (6, "statistics"),
(6, "probability"), (6, "mathematics"), (6, "theory"),
(7, "machine learning"), (7, "scikit-learn"), (7, "Mahout"),
(7, "neural networks"), (8, "neural networks"), (8, "deep learning"),
(8, "Big Data"), (8, "artificial intelligence"), (9, "Hadoop"),
(9, "Java"), (9, "MapReduce"), (9, "Big Data")
]
```
For example, Thor (id 4) has no friends in common with Devin (id 7), but they share
an interest in machine learning.
It’s easy to build a function that finds users with a certain interest:
```
def data_scientists_who_like(target_interest):
return [user_id
for user_id, user_interest in interests
if user_interest == target_interest]
data_scientists_who_like('Java')
```
This works, but it has to examine the whole list of interests for every search. If we
have a lot of users and interests (or if we just want to do a lot of searches), we’re probably
better off building an index from interests to users:
```
from collections import defaultdict
# keys are interests, values are lists of user_ids with that interest
user_ids_by_interest = defaultdict(list)
for user_id, interest in interests:
user_ids_by_interest[interest].append(user_id)
print user_ids_by_interest
# And another from users to interests:
# keys are user_ids, values are lists of interests for that user_id
interests_by_user_id = defaultdict(list)
for user_id, interest in interests:
interests_by_user_id[user_id].append(interest)
print interests_by_user_id
```
Now it’s easy to find who has the most interests in common with a given user:
- Iterate over the user’s interests.
- For each interest, iterate over the other users with that interest.
- Keep count of how many times we see each other user.
```
def most_common_interests_with(user):
return Counter(interested_user_id
for interest in interests_by_user_id[user["id"]]
for interested_user_id in user_ids_by_interest[interest]
if interested_user_id != user["id"])
```
# 4) Salaries and Experience
```
# Salary data is of course sensitive,
# but he manages to provide you an anonymous data set containing each user’s
# salary (in dollars) and tenure as a data scientist (in years):
salaries_and_tenures = [(83000, 8.7), (88000, 8.1),
(48000, 0.7), (76000, 6),
(69000, 6.5), (76000, 7.5),
(60000, 2.5), (83000, 10),
(48000, 1.9), (63000, 4.2)]
```
It seems pretty clear that people with more experience tend to earn more. How can
you turn this into a fun fact? Your first idea is to look at the average salary for each
tenure:
```
# keys are years, values are lists of the salaries for each tenure
salary_by_tenure = defaultdict(list)
for salary, tenure in salaries_and_tenures:
salary_by_tenure[tenure].append(salary)
print salary_by_tenure
# keys are years, each value is average salary for that tenure
average_salary_by_tenure = {
tenure : sum(salaries) / len(salaries)
for tenure, salaries in salary_by_tenure.items()
}
print average_salary_by_tenure
```
This turns out to be not particularly useful, as none of the users have the same tenure, which means we’re just reporting the individual users’ salaries.
```
# It might be more helpful to bucket the tenures:
def tenure_bucket(tenure):
if tenure < 2:
return "less than two"
elif tenure < 5:
return "between two and five"
else:
return "more than five"
# Then group together the salaries corresponding to each bucket:
# keys are tenure buckets, values are lists of salaries for that bucket
salary_by_tenure_bucket = defaultdict(list)
for salary, tenure in salaries_and_tenures:
bucket = tenure_bucket(tenure)
salary_by_tenure_bucket[bucket].append(salary)
# And finally compute the average salary for each group:
# keys are tenure buckets, values are average salary for that bucket
average_salary_by_bucket = {
tenure_bucket : sum(salaries) / len(salaries)
for tenure_bucket, salaries in salary_by_tenure_bucket.iteritems()
}
print average_salary_by_bucket
```
# 5) Paid Accounts
```
def predict_paid_or_unpaid(years_experience):
if years_experience < 3.0:
return "paid"
elif years_experience < 8.5:
return "unpaid"
else:
return "paid"
```
# 6) Topics of Interest
One simple (if not particularly exciting) way to find the most popular interests is simply
to count the words:
1. Lowercase each interest (since different users may or may not capitalize their
interests).
2. Split it into words.
3. Count the results.
```
words_and_counts = Counter(word
for user, interest in interests
for word in interest.lower().split())
print words_and_counts
```
This makes it easy to list out the words that occur more than once:
```
for word, count in words_and_counts.most_common():
if count > 1:
print word, count
```
| github_jupyter |
```
import math
from IPython import display
from matplotlib import cm
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
import pandas as pd
from sklearn import metrics
import tensorflow as tf
from tensorflow.python.data import Dataset
tf.logging.set_verbosity(tf.logging.ERROR)
pd.options.display.max_rows = 10
pd.options.display.float_format = '{:.1f}'.format
lottery_dataframe = pd.read_csv("lottery.csv", sep=",")
lottery_dataframe = lottery_dataframe.reindex(np.random.permutation(lottery_dataframe.index))
def preprocess_features(lottery_dataframe):
selected_features = lottery_dataframe[
["day",
"month",
"year",
#"no1",
#"top2",
#"top3",
#"front3_1",
#"front3_2",
#"bottom3_1",
#"bottom3_2"
]]
return selected_features
def preprocess_targets(lottery_dataframe):
output_targets = pd.DataFrame()
output_targets["bottom2"] = lottery_dataframe["bottom2"]
return output_targets
# Choose the first 578 (out of 684) examples for training.
training_examples = preprocess_features(lottery_dataframe.head(578))
training_targets = preprocess_targets(lottery_dataframe.head(578))
# Choose the last 106 (out of 684) examples for validation.
validation_examples = preprocess_features(lottery_dataframe.tail(106))
validation_targets = preprocess_targets(lottery_dataframe.tail(106))
# Double-check that we've done the right thing.
print("Training examples summary:")
display.display(training_examples.describe())
print("Validation examples summary:")
display.display(validation_examples.describe())
print("Training targets summary:")
display.display(training_targets.describe())
print("Validation targets summary:")
display.display(validation_targets.describe())
def construct_feature_columns(input_features):
return set([tf.feature_column.numeric_column(my_feature)
for my_feature in input_features])
def my_input_fn(features, targets, batch_size=1, shuffle=True, num_epochs=None):
"""Trains a neural network model.
Args:
features: pandas DataFrame of features
targets: pandas DataFrame of targets
batch_size: Size of batches to be passed to the model
shuffle: True or False. Whether to shuffle the data.
num_epochs: Number of epochs for which data should be repeated. None = repeat indefinitely
Returns:
Tuple of (features, labels) for next data batch
"""
# Convert pandas data into a dict of np arrays.
features = {key:np.array(value) for key,value in dict(features).items()}
# Construct a dataset, and configure batching/repeating.
ds = Dataset.from_tensor_slices((features,targets)) # warning: 2GB limit
ds = ds.batch(batch_size).repeat(num_epochs)
# Shuffle the data, if specified.
if shuffle:
ds = ds.shuffle(10000)
# Return the next batch of data.
features, labels = ds.make_one_shot_iterator().get_next()
return features, labels
def train_model(
learning_rate,
steps,
batch_size,
feature_columns,
training_examples,
training_targets,
validation_examples,
validation_targets):
"""Trains a linear regression model.
In addition to training, this function also prints training progress information,
as well as a plot of the training and validation loss over time.
Args:
learning_rate: A `float`, the learning rate.
steps: A non-zero `int`, the total number of training steps. A training step
consists of a forward and backward pass using a single batch.
feature_columns: A `set` specifying the input feature columns to use.
training_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for training.
training_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for training.
validation_examples: A `DataFrame` containing one or more columns from
`california_housing_dataframe` to use as input features for validation.
validation_targets: A `DataFrame` containing exactly one column from
`california_housing_dataframe` to use as target for validation.
Returns:
A `LinearRegressor` object trained on the training data.
"""
periods = 10
steps_per_period = steps / periods
# Create a linear regressor object.
my_optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate)
my_optimizer = tf.contrib.estimator.clip_gradients_by_norm(my_optimizer, 5.0)
linear_regressor = tf.estimator.LinearRegressor(
feature_columns=feature_columns,
optimizer=my_optimizer
)
training_input_fn = lambda: my_input_fn(training_examples,
training_targets["bottom2"],
batch_size=batch_size)
predict_training_input_fn = lambda: my_input_fn(training_examples,
training_targets["bottom2"],
num_epochs=1,
shuffle=False)
predict_validation_input_fn = lambda: my_input_fn(validation_examples,
validation_targets["bottom2"],
num_epochs=1,
shuffle=False)
# Train the model, but do so inside a loop so that we can periodically assess
# loss metrics.
print("Training model...")
print("RMSE (on training data):")
training_rmse = []
validation_rmse = []
for period in range (0, periods):
# Train the model, starting from the prior state.
linear_regressor.train(
input_fn=training_input_fn,
steps=steps_per_period
)
# Take a break and compute predictions.
training_predictions = linear_regressor.predict(input_fn=predict_training_input_fn)
training_predictions = np.array([item['predictions'][0] for item in training_predictions])
validation_predictions = linear_regressor.predict(input_fn=predict_validation_input_fn)
validation_predictions = np.array([item['predictions'][0] for item in validation_predictions])
# Compute training and validation loss.
training_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(training_predictions, training_targets))
validation_root_mean_squared_error = math.sqrt(
metrics.mean_squared_error(validation_predictions, validation_targets))
# Occasionally print the current loss.
print(" period %02d : %0.2f" % (period, training_root_mean_squared_error))
# Add the loss metrics from this period to our list.
training_rmse.append(training_root_mean_squared_error)
validation_rmse.append(validation_root_mean_squared_error)
print("Model training finished.")
# Output a graph of loss metrics over periods.
plt.ylabel("RMSE")
plt.xlabel("Periods")
plt.title("Root Mean Squared Error vs. Periods")
plt.tight_layout()
plt.plot(training_rmse, label="training")
plt.plot(validation_rmse, label="validation")
plt.legend()
return linear_regressor, validation_predictions
r, p = train_model(
learning_rate=0.001,
steps=5000,
batch_size=10,
feature_columns=construct_feature_columns(training_examples),
training_examples=training_examples,
training_targets=training_targets,
validation_examples=validation_examples,
validation_targets=validation_targets)
my_target = validation_targets.copy()
my_target['predict'] = p
my_target
# Output a graph of loss metrics over periods.
plt.ylabel("target")
plt.xlabel("predict")
plt.title("bottom2")
plt.tight_layout()
plt.plot(validation_targets, label="Target")
plt.plot(p, label="Predict")
plt.legend()
my_target
```
| github_jupyter |
# LOFO Feature Importance
https://github.com/aerdem4/lofo-importance
```
!pip install lofo-importance
import numpy as np
import pandas as pd
df = pd.read_csv("../input/train.csv", index_col='id')
df['wheezy-copper-turtle-magic'] = df['wheezy-copper-turtle-magic'].astype('category')
df.shape
```
### Use the best model in public kernels
```
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
def get_model():
return Pipeline([('scaler', StandardScaler()),
('qda', QuadraticDiscriminantAnalysis(reg_param=0.111))
])
```
### Top 20 Features for wheezy-copper-turtle-magic = 0
```
from sklearn.model_selection import KFold, StratifiedKFold, train_test_split
from sklearn.linear_model import LogisticRegression
from lofo import LOFOImportance, FLOFOImportance, plot_importance
features = [c for c in df.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
def get_lofo_importance(wctm_num):
sub_df = df[df['wheezy-copper-turtle-magic'] == wctm_num]
sub_features = [f for f in features if sub_df[f].std() > 1.5]
lofo_imp = LOFOImportance(sub_df, target="target",
features=sub_features,
cv=StratifiedKFold(n_splits=4, random_state=42, shuffle=True), scoring="roc_auc",
model=get_model(), n_jobs=4)
return lofo_imp.get_importance()
plot_importance(get_lofo_importance(0), figsize=(12, 12))
```
### Top 20 Features for wheezy-copper-turtle-magic = 1
```
plot_importance(get_lofo_importance(1), figsize=(12, 12))
```
### Top 20 Features for wheezy-copper-turtle-magic = 2
```
plot_importance(get_lofo_importance(2), figsize=(12, 12))
```
### Find the most harmful features for each wheezy-copper-turtle-magic
```
from tqdm import tqdm_notebook
import warnings
warnings.filterwarnings("ignore")
features_to_remove = []
potential_gain = []
for i in tqdm_notebook(range(512)):
imp = get_lofo_importance(i)
features_to_remove.append(imp["feature"].values[-1])
potential_gain.append(-imp["importance_mean"].values[-1])
print("Potential gain (AUC):", np.round(np.mean(potential_gain), 5))
features_to_remove
```
# Create submission using the current best kernel
https://www.kaggle.com/tunguz/ig-pca-nusvc-knn-qda-lr-stack by Bojan Tunguz
```
import numpy as np, pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn import svm, neighbors, linear_model, neural_network
from sklearn.svm import NuSVC
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from tqdm import tqdm
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score
from sklearn.feature_selection import VarianceThreshold
train = pd.read_csv('../input/train.csv')
test = pd.read_csv('../input/test.csv')
oof_svnu = np.zeros(len(train))
pred_te_svnu = np.zeros(len(test))
oof_svc = np.zeros(len(train))
pred_te_svc = np.zeros(len(test))
oof_knn = np.zeros(len(train))
pred_te_knn = np.zeros(len(test))
oof_lr = np.zeros(len(train))
pred_te_lr = np.zeros(len(test))
oof_mlp = np.zeros(len(train))
pred_te_mlp = np.zeros(len(test))
oof_qda = np.zeros(len(train))
pred_te_qda = np.zeros(len(test))
default_cols = [c for c in train.columns if c not in ['id', 'target', 'wheezy-copper-turtle-magic']]
for i in range(512):
cols = [c for c in default_cols if c != features_to_remove[i]]
train2 = train[train['wheezy-copper-turtle-magic']==i]
test2 = test[test['wheezy-copper-turtle-magic']==i]
idx1 = train2.index; idx2 = test2.index
train2.reset_index(drop=True,inplace=True)
data = pd.concat([pd.DataFrame(train2[cols]), pd.DataFrame(test2[cols])])
data2 = StandardScaler().fit_transform(PCA(svd_solver='full',n_components='mle').fit_transform(data[cols]))
train3 = data2[:train2.shape[0]]; test3 = data2[train2.shape[0]:]
data2 = StandardScaler().fit_transform(VarianceThreshold(threshold=1.5).fit_transform(data[cols]))
train4 = data2[:train2.shape[0]]; test4 = data2[train2.shape[0]:]
# STRATIFIED K FOLD (Using splits=25 scores 0.002 better but is slower)
skf = StratifiedKFold(n_splits=5, random_state=42)
for train_index, test_index in skf.split(train2, train2['target']):
clf = NuSVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=4, nu=0.59, coef0=0.053)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof_svnu[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
pred_te_svnu[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = neighbors.KNeighborsClassifier(n_neighbors=17, p=2.9)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof_knn[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
pred_te_knn[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = linear_model.LogisticRegression(solver='saga',penalty='l1',C=0.1)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof_lr[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
pred_te_lr[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = neural_network.MLPClassifier(random_state=3, activation='relu', solver='lbfgs', tol=1e-06, hidden_layer_sizes=(250, ))
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof_mlp[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
pred_te_mlp[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = svm.SVC(probability=True, kernel='poly', degree=4, gamma='auto', random_state=42)
clf.fit(train3[train_index,:],train2.loc[train_index]['target'])
oof_svc[idx1[test_index]] = clf.predict_proba(train3[test_index,:])[:,1]
pred_te_svc[idx2] += clf.predict_proba(test3)[:,1] / skf.n_splits
clf = QuadraticDiscriminantAnalysis(reg_param=0.111)
clf.fit(train4[train_index,:],train2.loc[train_index]['target'])
oof_qda[idx1[test_index]] = clf.predict_proba(train4[test_index,:])[:,1]
pred_te_qda[idx2] += clf.predict_proba(test4)[:,1] / skf.n_splits
print('lr', roc_auc_score(train['target'], oof_lr))
print('knn', roc_auc_score(train['target'], oof_knn))
print('svc', roc_auc_score(train['target'], oof_svc))
print('svcnu', roc_auc_score(train['target'], oof_svnu))
print('mlp', roc_auc_score(train['target'], oof_mlp))
print('qda', roc_auc_score(train['target'], oof_qda))
print('blend 1', roc_auc_score(train['target'], oof_svnu*0.7 + oof_svc*0.05 + oof_knn*0.2 + oof_mlp*0.05))
print('blend 2', roc_auc_score(train['target'], oof_qda*0.5+oof_svnu*0.35 + oof_svc*0.025 + oof_knn*0.1 + oof_mlp*0.025))
oof_svnu = oof_svnu.reshape(-1, 1)
pred_te_svnu = pred_te_svnu.reshape(-1, 1)
oof_svc = oof_svc.reshape(-1, 1)
pred_te_svc = pred_te_svc.reshape(-1, 1)
oof_knn = oof_knn.reshape(-1, 1)
pred_te_knn = pred_te_knn.reshape(-1, 1)
oof_mlp = oof_mlp.reshape(-1, 1)
pred_te_mlp = pred_te_mlp.reshape(-1, 1)
oof_lr = oof_lr.reshape(-1, 1)
pred_te_lr = pred_te_lr.reshape(-1, 1)
oof_qda = oof_qda.reshape(-1, 1)
pred_te_qda = pred_te_qda.reshape(-1, 1)
tr = np.concatenate((oof_svnu, oof_svc, oof_knn, oof_mlp, oof_lr, oof_qda), axis=1)
te = np.concatenate((pred_te_svnu, pred_te_svc, pred_te_knn, pred_te_mlp, pred_te_lr, pred_te_qda), axis=1)
print(tr.shape, te.shape)
oof_lrr = np.zeros(len(train))
pred_te_lrr = np.zeros(len(test))
skf = StratifiedKFold(n_splits=5, random_state=42)
for train_index, test_index in skf.split(tr, train['target']):
lrr = linear_model.LogisticRegression()
lrr.fit(tr[train_index], train['target'][train_index])
oof_lrr[test_index] = lrr.predict_proba(tr[test_index,:])[:,1]
pred_te_lrr += lrr.predict_proba(te)[:,1] / skf.n_splits
print('stack CV score =',round(roc_auc_score(train['target'],oof_lrr),6))
sub = pd.read_csv('../input/sample_submission.csv')
sub['target'] = pred_te_lrr
sub.to_csv('submission_stack.csv', index=False)
```
| github_jupyter |
Submitting various things for end of grant.
```
import os
import sys
import requests
import pandas
import paramiko
import json
from IPython import display
from curation_common import *
from htsworkflow.submission.encoded import DCCValidator
PANDAS_ODF = os.path.expanduser('~/src/odf_pandas')
if PANDAS_ODF not in sys.path:
sys.path.append(PANDAS_ODF)
from pandasodf import ODFReader
import gcat
from htsworkflow.submission.encoded import Document
from htsworkflow.submission.aws_submission import run_aws_cp
# live server & control file
#server = ENCODED('www.encodeproject.org')
spreadsheet_name = "ENCODE_test_miRNA_experiments_01112018"
# test server & datafile
server = ENCODED('test.encodedcc.org')
#spreadsheet_name = os.path.expanduser('~diane/woldlab/ENCODE/C1-encode3-limb-2017-testserver.ods')
server.load_netrc()
validator = DCCValidator(server)
award = 'UM1HG009443'
```
# Submit Documents
Example Document submission
```
#atac_uuid = '0fc44318-b802-474e-8199-f3b6d708eb6f'
#atac = Document(os.path.expanduser('~/proj/encode3-curation/Wold_Lab_ATAC_Seq_protocol_December_2016.pdf'),
# 'general protocol',
# 'ATAC-Seq experiment protocol for Wold lab',
# )
#body = atac.create_if_needed(server, atac_uuid)
#print(body['@id'])
```
# Submit Annotations
```
#sheet = gcat.get_file(spreadsheet_name, fmt='pandas_excel')
#annotations = sheet.parse('Annotations', header=0)
#created = server.post_sheet('/annotations/', annotations, verbose=True, dry_run=True)
#print(len(created))
#if created:
# annotations.to_excel('/tmp/annotations.xlsx', index=False)
```
# Register Biosamples
```
book = gcat.get_file(spreadsheet_name, fmt='pandas_excel')
biosample = book.parse('Biosamples', header=0)
created = server.post_sheet('/biosamples/', biosample,
verbose=True,
dry_run=True,
validator=validator)
print(len(created))
if created:
biosample.to_excel('/dev/shm/biosamples.xlsx', index=False)
```
# Register Libraries
```
print(spreadsheet_name)
book = gcat.get_file(spreadsheet_name, fmt='pandas_excel')
libraries = book.parse('Libraries', header=0)
created = server.post_sheet('/libraries/', libraries, verbose=True, dry_run=True, validator=validator)
print(len(created))
if created:
libraries.to_excel('/dev/shm/libraries.xlsx', index=False)
```
# Register Experiments
```
print(server.server)
book = gcat.get_file(spreadsheet_name, fmt='pandas_excel')
experiments = book.parse('Experiments', header=0)
created = server.post_sheet('/experiments/', experiments, verbose=True, dry_run=False, validator=validator)
print(len(created))
if created:
experiments.to_excel('/dev/shm/experiments.xlsx', index=False)
```
# Register Replicates
```
print(server.server)
print(spreadsheet_name)
book = gcat.get_file(spreadsheet_name, fmt='pandas_excel')
replicates = book.parse('Replicates', header=0)
created = server.post_sheet('/replicates/', replicates, verbose=True, dry_run=True, validator=validator)
print(len(created))
if created:
replicates.to_excel('/dev/shm/replicates.xlsx', index=False)
```
| github_jupyter |
# End-to-end learning for music audio
- http://qiita.com/himono/items/a94969e35fa8d71f876c
```
# データのダウンロード
wget http://mi.soi.city.ac.uk/datasets/magnatagatune/mp3.zip.001
wget http://mi.soi.city.ac.uk/datasets/magnatagatune/mp3.zip.002
wget http://mi.soi.city.ac.uk/datasets/magnatagatune/mp3.zip.003
# 結合
cat data/mp3.zip.* > data/music.zip
# 解凍
unzip data/music.zip -d music
```
```
%matplotlib inline
import os
import matplotlib.pyplot as plt
```
## MP3ファイルのロード
```
import numpy as np
from pydub import AudioSegment
def mp3_to_array(file):
# MP3 => RAW
song = AudioSegment.from_mp3(file)
song_arr = np.fromstring(song._data, np.int16)
return song_arr
%ls data/music/1/ambient_teknology-phoenix-01-ambient_teknology-0-29.mp3
file = 'data/music/1/ambient_teknology-phoenix-01-ambient_teknology-0-29.mp3'
song = mp3_to_array(file)
plt.plot(song)
```
## 楽曲タグデータをロード
- ランダムに3000曲を抽出
- よく使われるタグ50個を抽出
- 各曲には複数のタグがついている
```
import pandas as pd
tags_df = pd.read_csv('data/annotations_final.csv', delim_whitespace=True)
# 全体をランダムにサンプリング
tags_df = tags_df.sample(frac=1)
# 最初の3000曲を使う
tags_df = tags_df[:3000]
tags_df
top50_tags = tags_df.iloc[:, 1:189].sum().sort_values(ascending=False).index[:50].tolist()
y = tags_df[top50_tags].values
y
```
## 楽曲データをロード
- tags_dfのmp3_pathからファイルパスを取得
- mp3_to_array()でnumpy arrayをロード
- (samples, features, channels) になるようにreshape
- 音声波形は1次元なのでchannelsは1
- 訓練データはすべて同じサイズなのでfeaturesは同じになるはず(パディング不要)
```
files = tags_df.mp3_path.values
files = [os.path.join('data', 'music', x) for x in files]
X = np.array([mp3_to_array(file) for file in files])
X = X.reshape(X.shape[0], X.shape[1], 1)
X.shape
```
## 訓練データとテストデータに分割
```
from sklearn.model_selection import train_test_split
random_state = 42
train_x, test_x, train_y, test_y = train_test_split(X, y, test_size=0.2, random_state=random_state)
print(train_x.shape)
print(test_x.shape)
print(train_y.shape)
print(test_y.shape)
plt.plot(train_x[0])
np.save('train_x.npy', train_x)
np.save('test_x.npy', test_x)
np.save('train_y.npy', train_y)
np.save('test_y.npy', test_y)
```
## 訓練
```
import numpy as np
from keras.models import Model
from keras.layers import Dense, Flatten, Input, Conv1D, MaxPooling1D
from keras.callbacks import CSVLogger, ModelCheckpoint
train_x = np.load('train_x.npy')
train_y = np.load('train_y.npy')
test_x = np.load('test_x.npy')
test_y = np.load('test_y.npy')
print(train_x.shape)
print(train_y.shape)
print(test_x.shape)
print(test_y.shape)
features = train_x.shape[1]
x_inputs = Input(shape=(features, 1), name='x_inputs')
x = Conv1D(128, 256, strides=256, padding='valid', activation='relu')(x_inputs) # strided conv
x = Conv1D(32, 8, activation='relu')(x)
x = MaxPooling1D(4)(x)
x = Conv1D(32, 8, activation='relu')(x)
x = MaxPooling1D(4)(x)
x = Conv1D(32, 8, activation='relu')(x)
x = MaxPooling1D(4)(x)
x = Conv1D(32, 8, activation='relu')(x)
x = MaxPooling1D(4)(x)
x = Flatten()(x)
x = Dense(100, activation='relu')(x)
x_outputs = Dense(50, activation='sigmoid', name='x_outputs')(x)
model = Model(inputs=x_inputs, outputs=x_outputs)
model.compile(optimizer='adam',
loss='categorical_crossentropy')
logger = CSVLogger('history.log')
checkpoint = ModelCheckpoint(
'model.{epoch:02d}-{val_loss:.3f}.h5',
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='auto')
model.fit(train_x, train_y, batch_size=600, epochs=50,
validation_data=[test_x, test_y],
callbacks=[logger, checkpoint])
```
## 予測
- taggerは複数のタグを出力するのでevaluate()ではダメ?
```
import numpy as np
from keras.models import load_model
from sklearn.metrics import roc_auc_score
test_x = np.load('test_x.npy')
test_y = np.load('test_y.npy')
model = load_model('model.22-9.187-0.202.h5')
pred_y = model.predict(test_x, batch_size=50)
print(roc_auc_score(test_y, pred_y))
print(model.evaluate(test_x, test_y))
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import requests
import tensorflow as tf
import autokeras as ak
import kerastuner
import tensorflow_addons as tfa
RS = 69420
# Data Download (may take a few minutes depending on your network)
train_datalink_X = 'https://tournament.datacrunch.com/data/X_train.csv'
train_datalink_y = 'https://tournament.datacrunch.com/data/y_train.csv'
hackathon_data_link = 'https://tournament.datacrunch.com/data/X_test.csv'
# Data for training
train_data = pd.read_csv(train_datalink_X)
# Data for which you will submit your prediction
test_data = pd.read_csv(hackathon_data_link)
# Targets to be predicted
train_targets = pd.read_csv(train_datalink_y)
train_data
#If you don't want to work with time series (Later going to produce new models for each moon)
train_data = train_data.drop(columns=['Moons', 'id'])
test_data = test_data.drop(columns=['Moons', 'id'])
X, y = train_data, train_targets
X.shape, y.shape
from sklearn.model_selection import TimeSeriesSplit
tscv = TimeSeriesSplit()
print(tscv)
for train_index, test_index in tscv.split(X):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
X_train.shape, y_train.shape, y_test.shape, X_test.shape
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
# Initialize the structured data classifier.
rmse_metrics = tf.metrics.RootMeanSquaredError()
clf = ak.StructuredDataRegressor(overwrite=False,
project_name='DataCrunchAK',
directory=r"D:\DataCrunch",
metrics=[rmse_metrics],
objective=kerastuner.Objective("val_loss", direction='min'),
seed=RS,
max_trials=100)
from tensorflow.keras.callbacks import EarlyStopping
es = EarlyStopping(monitor='val_loss', patience=3, restore_best_weights=True)
%%time
clf.fit(X_train, y_train,
epochs=75,
batch_size=512,
validation_split=0.2,
callbacks=[es],
verbose=1)
model = clf.export_model()
model.summary()
model.evaluate(X_test, y_test)
preds = model.predict(test_data)
train_targets
sc_preds = MinMaxScaler(feature_range=(0.001, 0.999))
preds_sc = sc_preds.fit_transform(preds)
prediction = pd.DataFrame(columns=['target_r', 'target_g', 'target_b'])
prediction['target_r'] = preds_sc[:, 0]
prediction['target_g'] = preds_sc[:, 1]
prediction['target_b'] = preds_sc[:, 2]
prediction
# prediction.to_csv('1051.csv')
prediction.plot(kind='box')
API_KEY = 'F1awQwoH9yW7AgG18Nf8XlZNW9xp23b8vY2hHgMxdDimd3u7Z6Q5brcLydHR'
r = requests.post("https://tournament.datacrunch.com/api/submission",
files = {
"file": ("x", prediction.to_csv().encode('ascii'))
},
data = {
"apiKey": API_KEY
},
)
if r.status_code == 200:
print("Submission submitted :)")
elif r.status_code == 423:
print("ERR: Submissions are close")
print("You can only submit during rounds eg: Friday 7pm GMT+1 to Sunday midnight GMT+1.")
print("Or the server is currently crunching the submitted files, please wait some time before retrying.")
elif r.status_code == 422:
print("ERR: API Key is missing or empty")
print("Did you forget to fill the API_KEY variable?")
elif r.status_code == 404:
print("ERR: Unknown API Key")
print("You should check that the provided API key is valid and is the same as the one you've received by email.")
elif r.status_code == 400:
print("ERR: The file must not be empty")
print("You have send a empty file.")
elif r.status_code == 401:
print("ERR: Your email hasn't been verified")
print("Please verify your email or contact a cruncher.")
elif r.status_code == 429:
print("ERR: Too many submissions")
else:
print("ERR: Server returned: " + str(r.status_code))
print("Ouch! It seems that we were not expecting this kind of result from the server, if the probleme persist, contact a cruncher.")
```
| github_jupyter |
<table> <tr>
<td style="background-color:#ffffff;">
<a href="http://qworld.lu.lv" target="_blank"><img src="..\images\qworld.jpg" width="25%" align="left"> </a></td>
<td style="background-color:#ffffff;vertical-align:bottom;text-align:right;">
prepared by <a href="http://abu.lu.lv" target="_blank">Abuzer Yakaryilmaz</a> (<a href="http://qworld.lu.lv/index.php/qlatvia/" target="_blank">QLatvia</a>)
</td>
</tr></table>
<table width="100%"><tr><td style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;text-align:right;">This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros. </td></tr></table>
$ \newcommand{\bra}[1]{\langle #1|} $
$ \newcommand{\ket}[1]{|#1\rangle} $
$ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
$ \newcommand{\dot}[2]{ #1 \cdot #2} $
$ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
$ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
$ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
$ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
$ \newcommand{\mypar}[1]{\left( #1 \right)} $
$ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
$ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
$ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
$ \newcommand{\onehalf}{\frac{1}{2}} $
$ \newcommand{\donehalf}{\dfrac{1}{2}} $
$ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
$ \newcommand{\vzero}{\myvector{1\\0}} $
$ \newcommand{\vone}{\myvector{0\\1}} $
$ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
$ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
$ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
$ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
$ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
$ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
$ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
$ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
$ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
$ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
<h2> <font color="blue"> Solutions for </font>Probabilistic Bit</h2>
<a id="task2"></a>
<h3> Task 2 </h3>
Suppose that Fyodor hiddenly rolls a loaded (tricky) dice with the bias
$$ Pr(1):Pr(2):Pr(3):Pr(4):Pr(5):Pr(6) = 7:5:4:2:6:1 . $$
Represent your information on the result as a column vector. Remark that the size of your column should be 6.
You may use python for your calculations.
<h3>Solution</h3>
```
# all portions are stored in a list
all_portions = [7,5,4,2,6,1];
# let's calculate the total portion
total_portion = 0
for i in range(6):
total_portion = total_portion + all_portions[i]
print("total portion is",total_portion)
# find the weight of one portion
one_portion = 1/total_portion
print("the weight of one portion is",one_portion)
print() # print an empty line
# now we can calculate the probabilities of rolling 1,2,3,4,5, and 6
for i in range(6):
print("the probability of rolling",(i+1),"is",(one_portion*all_portions[i]))
```
| github_jupyter |
```
#@title Paste the values then click run
#@markdown <a href="https://techtanic.github.io/duce" target="_blank">Website</a>
email = "[email protected]" #@param {type: "string"}
password = "1234test" #@param {type: "string"}
import os
for index,item in enumerate(["requests","bs4","html5lib","colorama","tqdm","cloudscraper"]):
print(f"installing {index}/5")
os.system(f"pip3 install {item} -U")
from functools import partial
from tqdm import tqdm
tqdm = partial(tqdm, position=0, leave=True)
from colorama import Fore, Back, Style
# colors foreground text:
fc = Fore.CYAN
fg = Fore.GREEN
fw = Fore.WHITE
fr = Fore.RED
fb = Fore.BLUE
flb = Fore.LIGHTBLUE_EX
fbl = Fore.BLACK
fy = Fore.YELLOW
fm = Fore.MAGENTA
# colors background text:
bc = Back.CYAN
bg = Back.GREEN
bw = Back.WHITE
br = Back.RED
bb = Back.BLUE
by = Back.YELLOW
bm = Back.MAGENTA
# colors style text:
sd = Style.DIM
sn = Style.NORMAL
sb = Style.BRIGHT
import json
import random
import re
import threading
import time
import traceback
from urllib.parse import parse_qs, unquote, urlsplit
from decimal import Decimal
import requests
import cloudscraper
from bs4 import BeautifulSoup as bs
# DUCE-CLI
def remove_suffix(input_string:str, suffix:str)->str:
if suffix and input_string.endswith(suffix):
return input_string[: -len(suffix)]
return input_string
def remove_prefix(input_string:str, prefix:str)->str:
if prefix and input_string.startswith(prefix):
return input_string[len(prefix) :]
return input_string
# Scraper
def discudemy():
global du_links
du_links = []
big_all = []
head = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
}
for page in range(1, 4):
r = requests.get("https://www.discudemy.com/all/" + str(page), headers=head)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("section", "card")
big_all.extend(small_all)
du_bar = tqdm(total=len(big_all), desc="Discudemy")
for index, item in enumerate(big_all):
du_bar.update(1)
title = item.string
url = item["href"].split("/")[4]
r = requests.get("https://www.discudemy.com/go/" + url, headers=head)
soup = bs(r.content, "html5lib")
du_links.append(title + "|:|" + soup.find("a", id="couponLink").string)
du_bar.close()
def udemy_freebies():
global uf_links
uf_links = []
big_all = []
for page in range(1, 3):
r = requests.get(
"https://www.udemyfreebies.com/free-udemy-courses/" + str(page)
)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", {"class": "theme-img"})
big_all.extend(small_all)
uf_bar = tqdm(total=len(big_all), desc="Udemy Freebies")
for index, item in enumerate(big_all):
uf_bar.update(1)
title = item.img["alt"]
link = requests.get(
"https://www.udemyfreebies.com/out/" + item["href"].split("/")[4]
).url
uf_links.append(title + "|:|" + link)
uf_bar.close()
def tutorialbar():
global tb_links
tb_links = []
big_all = []
for page in range(1, 4):
r = requests.get("https://www.tutorialbar.com/all-courses/page/" + str(page))
soup = bs(r.content, "html5lib")
small_all = soup.find_all(
"h3", class_="mb15 mt0 font110 mobfont100 fontnormal lineheight20"
)
big_all.extend(small_all)
tb_bar = tqdm(total=len(big_all), desc="Tutorial Bar")
for index, item in enumerate(big_all):
tb_bar.update(1)
title = item.a.string
url = item.a["href"]
r = requests.get(url)
soup = bs(r.content, "html5lib")
link = soup.find("a", class_="btn_offer_block re_track_btn")["href"]
if "www.udemy.com" in link:
tb_links.append(title + "|:|" + link)
tb_bar.close()
def real_discount():
global rd_links
rd_links = []
big_all = []
for page in range(1, 3):
r = requests.get("https://real.discount/stores/Udemy?page=" + str(page))
soup = bs(r.content, "html5lib")
small_all = soup.find_all("div", class_="col-xl-4 col-md-6")
big_all.extend(small_all)
rd_bar = tqdm(total=len(big_all), desc="Real Discount")
for index, item in enumerate(big_all):
rd_bar.update(1)
title = item.h3.string
url = "https://real.discount" + item.a["href"]
r = requests.get(url)
soup = bs(r.content, "html5lib")
link = soup.find("div", class_="col-xs-12 col-md-12 col-sm-12 text-center").a[
"href"
]
if link.startswith("http://click.linksynergy.com"):
link = parse_qs(link)["RD_PARM1"][0]
rd_links.append(title+"|:|"+link)
rd_bar.close()
def coursevania():
global cv_links
cv_links = []
r = requests.get("https://coursevania.com/courses/")
soup = bs(r.content, "html5lib")
nonce = json.loads([script.string for script in soup.find_all('script') if script.string and "load_content" in script.string][0].strip("_mlv = norsecat;\n"))["load_content"]
r = requests.get(
"https://coursevania.com/wp-admin/admin-ajax.php?&template=courses/grid&args={%22posts_per_page%22:%2230%22}&action=stm_lms_load_content&nonce="
+ nonce
+ "&sort=date_high"
).json()
soup = bs(r["content"], "html5lib")
small_all = soup.find_all("div", {"class": "stm_lms_courses__single--title"})
cv_bar = tqdm(total=len(small_all), desc="Course Vania")
for index, item in enumerate(small_all):
cv_bar.update(1)
title = item.h5.string
r = requests.get(item.a["href"])
soup = bs(r.content, "html5lib")
cv_links.append(
title + "|:|" + soup.find("div", {"class": "stm-lms-buy-buttons"}).a["href"]
)
cv_bar.close()
def idcoupons():
global idc_links
idc_links = []
big_all = []
for page in range(1, 6):
r = requests.get(
"https://idownloadcoupon.com/product-category/udemy-2/page/" + str(page)
)
soup = bs(r.content, "html5lib")
small_all = soup.find_all("a", attrs={"class": "button product_type_external"})
big_all.extend(small_all)
idc_bar = tqdm(total=len(big_all), desc="IDownloadCoupons")
for index, item in enumerate(big_all):
idc_bar.update(1)
title = item["aria-label"]
link = unquote(item["href"])
if link.startswith("https://ad.admitad.com"):
link = parse_qs(link)["ulp"][0]
elif link.startswith("https://click.linksynergy.com"):
link = parse_qs(link)["murl"][0]
idc_links.append(title + "|:|" + link)
idc_bar.close()
def enext() -> list:
en_links = []
r = requests.get("https://e-next.in/e/udemycoupons.php")
soup = bs(r.content, "html5lib")
big_all = soup.find("div", {"class": "scroll-box"}).find_all("p", {"class": "p2"})
en_bar = tqdm(total=len(big_all), desc="E-next")
for i in big_all:
en_bar.update(1)
title = i.text[11:].strip().removesuffix("Enroll Now free").strip()
link = i.a["href"]
en_links.append(title + "|:|" + link)
en_bar.close()
# Constants
version = "v1.6"
def create_scrape_obj():
funcs = {
"Discudemy": threading.Thread(target=discudemy, daemon=True),
"Udemy Freebies": threading.Thread(target=udemy_freebies, daemon=True),
"Tutorial Bar": threading.Thread(target=tutorialbar, daemon=True),
"Real Discount": threading.Thread(target=real_discount, daemon=True),
"Course Vania": threading.Thread(target=coursevania, daemon=True),
"IDownloadCoupons": threading.Thread(target=idcoupons, daemon=True),
"E-next": threading.Thread(target=enext, daemon=True),
}
return funcs
################
def cookiejar(
client_id,
access_token,
csrf_token,
):
cookies = dict(
client_id=client_id,
access_token=access_token,
csrf_token=csrf_token,
)
return cookies
def get_course_id(url):
r = requests.get(url, allow_redirects=False)
if r.status_code in (404, 302, 301):
return False
if "/course/draft/" in url:
return False
soup = bs(r.content, "html5lib")
try:
courseid = soup.find(
"div",
attrs={"data-content-group": "Landing Page"},
)["data-course-id"]
except:
courseid = soup.find(
"body", attrs={"data-module-id": "course-landing-page/udlite"}
)["data-clp-course-id"]
# with open("problem.txt","w",encoding="utf-8") as f:
# f.write(str(soup))
return courseid
def get_course_coupon(url):
query = urlsplit(url).query
params = parse_qs(query)
try:
params = {k: v[0] for k, v in params.items()}
return params["couponCode"]
except:
return ""
def course_landing_api(courseid):
r = s.get(
"https://www.udemy.com/api-2.0/course-landing-components/"
+ courseid
+ "/me/?components=purchase"
).json()
try:
purchased = r["purchase"]["data"]["purchase_date"]
except:
purchased = False
try:
amount = r["purchase"]["data"]["list_price"]["amount"]
except:
print(r["purchase"]["data"])
return purchased, Decimal(amount)
def remove_duplicates(l):
l = l[::-1]
for i in l:
while l.count(i) > 1:
l.remove(i)
return l[::-1]
def update_available():
if remove_prefix(version,"v") < remove_prefix(requests.get(
"https://api.github.com/repos/techtanic/Discounted-Udemy-Course-Enroller/releases/latest"
).json()["tag_name"],"v"):
print(by + fr + " Update Available ")
else:
return
def check_login():
for retry in range(4):
s = cloudscraper.CloudScraper()
r = s.get(
"https://www.udemy.com/join/signup-popup/",
)
soup = bs(r.text, "html5lib")
csrf_token = soup.find("input", {"name": "csrfmiddlewaretoken"})["value"]
data = {
"email": email,
"password": password,
"locale": "en_US",
"csrfmiddlewaretoken": csrf_token,
}
s.headers.update({"Referer": "https://www.udemy.com/join/signup-popup/"})
try:
r = s.post(
"https://www.udemy.com/join/login-popup/?locale=en_US",
data=data,
allow_redirects=False,
)
except cloudscraper.exceptions.CloudflareChallengeError:
if retry == 3:
print("Cloudflare is blocking your requests try again after an hour")
!kill -9 -1
retry -= 1
continue
if r.status_code != 302:
soup = bs(r.content, "html5lib")
txt = soup.find("div", class_="alert alert-danger js-error-alert").text.strip()
if txt[0] == "Y":
print("Too many logins per hour try later")
elif txt[0] == "T":
print("Email or password incorrect")
else:
print(txt)
time.sleep(1)
!kill -9 -1
cookies = cookiejar(r.cookies["client_id"], r.cookies["access_token"], csrf_token)
head = {
"authorization": "Bearer " + r.cookies["access_token"],
"accept": "application/json, text/plain, */*",
"x-requested-with": "XMLHttpRequest",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36 Edg/89.0.774.77",
"x-forwarded-for": str(
".".join(map(str, (random.randint(0, 255) for _ in range(4))))
),
"x-udemy-authorization": "Bearer " + r.cookies["access_token"],
"content-type": "application/json;charset=UTF-8",
"origin": "https://www.udemy.com",
"referer": "https://www.udemy.com/",
"dnt": "1",
}
s = requests.session()
s.cookies.update(cookies)
s.headers.update(head)
s.keep_alive = False
r = s.get(
"https://www.udemy.com/api-2.0/contexts/me/?me=True&Config=True"
).json()
currency = r["Config"]["price_country"]["currency"]
user = ""
user = r["me"]["display_name"]
return head, user, currency, s
# -----------------
def free_checkout(coupon, courseid):
payload = (
'{"checkout_environment":"Marketplace","checkout_event":"Submit","shopping_info":{"items":[{"discountInfo":{"code":"'
+ coupon
+ '"},"buyable":{"type":"course","id":'
+ str(courseid)
+ ',"context":{}},"price":{"amount":0,"currency":"'
+ currency
+ '"}}]},"payment_info":{"payment_vendor":"Free","payment_method":"free-method"}}'
)
r = s.post(
"https://www.udemy.com/payment/checkout-submit/",
data=payload,
verify=False,
)
return r.json()
def free_enroll(courseid):
s.get(
"https://www.udemy.com/course/subscribe/?courseId=" + str(courseid)
)
r = s.get(
"https://www.udemy.com/api-2.0/users/me/subscribed-courses/"
+ str(courseid)
+ "/?fields%5Bcourse%5D=%40default%2Cbuyable_object_type%2Cprimary_subcategory%2Cis_private"
)
return r.json()
# -----------------
def auto(list_st):
se_c, ae_c, e_c, ex_c, as_c = 0, 0, 0, 0, 0
for index, link in enumerate(list_st):
title = link.split("|:|")
print(fy + str(index) + " " + title[0], end=" ")
link = title[1]
print(fb + link)
course_id = get_course_id(link)
if course_id:
coupon_id = get_course_coupon(link)
purchased, amount = course_landing_api(course_id)
if not purchased:
if coupon_id:
slp = ""
js = free_checkout(coupon_id, course_id)
try:
if js["status"] == "succeeded":
print(fg + "Successfully Enrolled\n")
se_c += 1
as_c += amount
elif js["status"] == "failed":
# print(js)
print(fr + "Coupon Expired\n")
e_c += 1
except:
try:
msg = js["detail"]
print(fr + msg)
print()
slp = int(re.search(r"\d+", msg).group(0))
except:
# print(js)
print(fr + "Expired Coupon\n")
e_c += 1
if slp != "":
slp += 5
print(
fr
+ ">>> Pausing execution of script for "
+ str(slp)
+ " seconds\n",
)
time.sleep(slp)
else:
time.sleep(4)
elif not coupon_id:
js = free_enroll(course_id)
try:
if js["_class"] == "course":
print(fg + "Successfully Subscribed\n")
se_c += 1
as_c += amount
except:
print(fr + "COUPON MIGHT HAVE EXPIRED\n")
e_c += 1
elif purchased:
print(flb + purchased)
print()
ae_c += 1
elif not course_id:
print(fr + "Course Doesn't exist\n")
print(f"Successfully Enrolled: {se_c}")
print(f"Already Enrolled: {ae_c}")
print(f"Amount Saved: ${round(as_c,2)}")
print(f"Expired Courses: {e_c}")
print(f"Excluded Courses: {ex_c}")
def random_color():
col = ["green", "yellow", "white"]
return random.choice(col)
##########################################
def main1():
try:
links_ls = []
for index in all_functions:
all_functions[index].start()
time.sleep(0.09)
for t in all_functions:
all_functions[t].join()
time.sleep(1)
for link_list in [
"du_links",
"uf_links",
"tb_links",
"rd_links",
"cv_links",
"idc_links",
"en_links",
]:
try:
links_ls += eval(link_list)
except:
pass
auto(remove_duplicates(links_ls))
except:
e = traceback.format_exc()
print(e)
############## MAIN ############# MAIN############## MAIN ############# MAIN ############## MAIN ############# MAIN ###########
print(fb+"Trying to login")
try:
head, user, currency, s= check_login()
print(fg+f"Logged in as {user}")
except Exception as e:
print(fr+f"Login Error")
e = traceback.format_exc()
print(e)
try:
update_available()
except:
pass
all_functions = create_scrape_obj()
tm = threading.Thread(target=main1, daemon=True)
tm.start()
tm.join()
try:
update_available()
except:
pass
```
| github_jupyter |
# Porto Seguro's Safe Driving Prediction
Porto Seguro, one of Brazil’s largest auto and homeowner insurance companies, completely agrees. Inaccuracies in car insurance company’s claim predictions raise the cost of insurance for good drivers and reduce the price for bad ones.
In the [Porto Seguro Safe Driver Prediction competition](https://www.kaggle.com/c/porto-seguro-safe-driver-prediction), the challenge is to build a model that predicts the probability that a driver will initiate an auto insurance claim in the next year. While Porto Seguro has used machine learning for the past 20 years, they’re looking to Kaggle’s machine learning community to explore new, more powerful methods. A more accurate prediction will allow them to further tailor their prices, and hopefully make auto insurance coverage more accessible to more drivers.
Lucky for you, a machine learning model was built to solve the Porto Seguro problem by the data scientist on your team. The solution notebook has steps to load data, split the data into test and train sets, train, evaluate and save a LightGBM model that will be used for the future challenges.
#### Hint: use shift + enter to run the code cells below. Once the cell turns from [*] to [#], you can be sure the cell has run.
## Import Needed Packages
Import the packages needed for this solution notebook. The most widely used packages for machine learning for [scikit-learn](https://scikit-learn.org/stable/), [pandas](https://pandas.pydata.org/docs/getting_started/index.html#getting-started), and [numpy](https://numpy.org/). These packages have various features, as well as a lot of clustering, regression and classification algorithms that make it a good choice for data mining and data analysis. In this notebook, we're using a training function from [lightgbm](https://lightgbm.readthedocs.io/en/latest/index.html).
```
import os
import numpy as np
import pandas as pd
import lightgbm
from sklearn.model_selection import train_test_split
import joblib
from sklearn import metrics
```
## Load Data
Load the training dataset from the ./data/ directory. Df.shape() allows you to view the dimensions of the dataset you are passing in. If you want to view the first 5 rows of data, df.head() allows for this.
```
DATA_DIR = "../data"
data_df = pd.read_csv(os.path.join(DATA_DIR, 'porto_seguro_safe_driver_prediction_input.csv'))
print(data_df.shape)
data_df.head()
```
## Split Data into Train and Validatation Sets
Partitioning data into training, validation, and holdout sets allows you to develop highly accurate models that are relevant to data that you collect in the future, not just the data the model was trained on.
In machine learning, features are the measurable property of the object you’re trying to analyze. Typically, features are the columns of the data that you are training your model with minus the label. In machine learning, a label (categorical) or target (regression) is the output you get from your model after training it.
```
features = data_df.drop(['target', 'id'], axis = 1)
labels = np.array(data_df['target'])
features_train, features_valid, labels_train, labels_valid = train_test_split(features, labels, test_size=0.2, random_state=0)
train_data = lightgbm.Dataset(features_train, label=labels_train)
valid_data = lightgbm.Dataset(features_valid, label=labels_valid, free_raw_data=False)
```
## Train Model
A machine learning model is an algorithm which learns features from the given data to produce labels which may be continuous or categorical ( regression and classification respectively ). In other words, it tries to relate the given data with its labels, just as the human brain does.
In this cell, the data scientist used an algorithm called [LightGBM](https://lightgbm.readthedocs.io/en/latest/), which primarily used for unbalanced datasets. AUC will be explained in the next cell.
```
parameters = {
'learning_rate': 0.02,
'boosting_type': 'gbdt',
'objective': 'binary',
'metric': 'auc',
'sub_feature': 0.7,
'num_leaves': 60,
'min_data': 100,
'min_hessian': 1,
'verbose': 4
}
model = lightgbm.train(parameters,
train_data,
valid_sets=valid_data,
num_boost_round=500,
early_stopping_rounds=20)
```
## Evaluate Model
Evaluating performance is an essential task in machine learning. In this case, because this is a classification problem, the data scientist elected to use an AUC - ROC Curve. When we need to check or visualize the performance of the multi - class classification problem, we use AUC (Area Under The Curve) ROC (Receiver Operating Characteristics) curve. It is one of the most important evaluation metrics for checking any classification model’s performance.
<img src="https://www.researchgate.net/profile/Oxana_Trifonova/publication/276079439/figure/fig2/AS:614187332034565@1523445079168/An-example-of-ROC-curves-with-good-AUC-09-and-satisfactory-AUC-065-parameters.png"
alt="Markdown Monster icon"
style="float: left; margin-right: 12px; width: 320px; height: 239px;" />
```
predictions = model.predict(valid_data.data)
fpr, tpr, thresholds = metrics.roc_curve(valid_data.label, predictions)
model_metrics = {"auc": (metrics.auc(fpr, tpr))}
print(model_metrics)
```
## Save Model
In machine learning, we need to save the trained models in a file and restore them in order to reuse it to compare the model with other models, to test the model on a new data. The saving of data is called Serializaion, while restoring the data is called Deserialization.
```
model_name = "lgbm_binary_model.pkl"
joblib.dump(value=model, filename=model_name)
```
| github_jupyter |
```
%matplotlib inline
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
plt.style.use('ggplot')
import pickle
from sklearn.preprocessing import LabelEncoder
import seaborn as sns
color = sns.color_palette()
sns.set(rc={'figure.figsize':(12,8)})
import sklearn
from sklearn.preprocessing import MinMaxScaler,StandardScaler,LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import r2_score
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn import metrics
import warnings
warnings.filterwarnings('ignore')
df=pd.read_csv('Cardata-Cleaned.csv',index_col=0)
df.info()
```
# Exploratory Data Analysis (EDA)
## Univariate Analysis
```
cat_cols = ['Fuel','Seller Type','Transmission','Owner']
i=0
while i < 4:
fig = plt.figure(figsize=[15,6])
plt.subplot(1,2,1)
sns.countplot(x=cat_cols[i], data=df)
i += 1
plt.subplot(1,2,2)
sns.countplot(x=cat_cols[i], data=df)
i += 1
plt.show()
num_cols = ['Selling Price','Current Value','KMs Driven','Year','max_power','Mileage','Engine','Gear Box']
i=0
while i < 8:
fig = plt.figure(figsize=[15,20])
plt.subplot(4,2,1)
sns.boxplot(x=num_cols[i], data=df)
i += 1
plt.subplot(4,2,2)
sns.boxplot(x=num_cols[i], data=df)
i += 1
```
## Bivariate Analysis
```
sns.set(rc={'figure.figsize':(15,15)})
sns.heatmap(df.corr(),annot=True)
print(df['Fuel'].value_counts(),'\n')
print(df['Seller Type'].value_counts(),'\n')
print(df['Transmission'].value_counts(),'\n')
print(df['Owner'].value_counts(),'\n')
df.pivot_table(values='Selling Price', index = 'Seller Type', columns= 'Fuel')
```
# Data Preparation
## Creating Dummies for Categorical Features
```
label_encoder = LabelEncoder()
df['Owner']= label_encoder.fit_transform(df['Owner'])
final_dataset=df[['Year','Selling Price','Current Value','KMs Driven','Fuel',
'Seller Type','max_power','Transmission','Owner','Mileage','Engine','Seats','Gear Box']]
final_dataset=pd.get_dummies(final_dataset,drop_first=True)
sns.set(rc={'figure.figsize':(15,15)})
sns.heatmap(final_dataset.corr(),annot=True,cmap="RdBu")
final_dataset.corr()['Selling Price'].sort_values(ascending=False)
y = final_dataset['Selling Price']
X = final_dataset.drop('Selling Price',axis=1)
```
## Feature Importance
```
from sklearn.ensemble import ExtraTreesRegressor
model = ExtraTreesRegressor()
model.fit(X,y)
print(model.feature_importances_)
sns.set(rc={'figure.figsize':(12,8)})
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(5).plot(kind='barh')
plt.show()
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1)
print("x train: ",X_train.shape)
print("x test: ",X_test.shape)
print("y train: ",y_train.shape)
print("y test: ",y_test.shape)
CV = []
R2_train = []
R2_test = []
MAE=[]
MSE=[]
RMSE=[]
def car_pred_model(model):
# R2 score of train set
y_pred_train = model.predict(X_train)
R2_train_model = r2_score(y_train,y_pred_train)
R2_train.append(round(R2_train_model,2))
# R2 score of test set
y_pred_test = model.predict(X_test)
R2_test_model = r2_score(y_test,y_pred_test)
R2_test.append(round(R2_test_model,2))
# R2 mean of train set using Cross validation
cross_val = cross_val_score(model ,X_train ,y_train ,cv=5)
cv_mean = cross_val.mean()
CV.append(round(cv_mean,2))
print("Train R2-score :",round(R2_train_model,2))
print("Test R2-score :",round(R2_test_model,2))
print("Train CV scores :",cross_val)
print("Train CV mean :",round(cv_mean,2))
MAE.append(metrics.mean_absolute_error(y_test, y_pred_test))
MSE.append(metrics.mean_squared_error(y_test, y_pred_test))
RMSE.append(np.sqrt(metrics.mean_squared_error(y_test, y_pred_test)))
print('MAE:', metrics.mean_absolute_error(y_test, y_pred_test))
print('MSE:', metrics.mean_squared_error(y_test, y_pred_test))
print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, y_pred_test)))
fig, ax = plt.subplots(1,2,figsize = (12,6))
ax[0].set_title('Residual Plot of Train samples')
sns.distplot((y_test-y_pred_test),hist = True,ax = ax[0])
ax[0].set_xlabel('y_train - y_pred_train')
# Y_test vs Y_train scatter plot
ax[1].set_title('y_test vs y_pred_test')
ax[1].scatter(x = y_test, y = y_pred_test)
ax[1].set_xlabel('y_test')
ax[1].set_ylabel('y_pred_test')
plt.show()
```
## Linear Regression
```
lr = LinearRegression()
lr.fit(X_train,y_train)
car_pred_model(lr)
```
## Ridge
```
# Creating Ridge model object
rg = Ridge()
# range of alpha
alpha = np.logspace(-3,3,num=14)
# Creating RandomizedSearchCV to find the best estimator of hyperparameter
rg_rs = RandomizedSearchCV(estimator = rg, param_distributions = dict(alpha=alpha))
rg_rs.fit(X_train,y_train)
car_pred_model(rg_rs)
```
## Lasso
```
ls = Lasso()
alpha = np.logspace(-3,3,num=14) # range for alpha
ls_rs = RandomizedSearchCV(estimator = ls, param_distributions = dict(alpha=alpha))
ls_rs.fit(X_train,y_train)
car_pred_model(ls_rs)
```
## Random Forest
```
rf = RandomForestRegressor()
# Number of trees in Random forest
n_estimators=list(range(500,1000,100))
# Maximum number of levels in a tree
max_depth=list(range(4,9,4))
# Minimum number of samples required to split an internal node
min_samples_split=list(range(4,9,2))
# Minimum number of samples required to be at a leaf node.
min_samples_leaf=[1,2,5,7]
# Number of fearures to be considered at each split
max_features=['auto','sqrt']
# Hyperparameters dict
param_grid = {"n_estimators":n_estimators,
"max_depth":max_depth,
"min_samples_split":min_samples_split,
"min_samples_leaf":min_samples_leaf,
"max_features":max_features}
rf_rs = RandomizedSearchCV(estimator = rf, param_distributions = param_grid,cv = 5, random_state=42, n_jobs = 1)
rf_rs.fit(X_train,y_train)
car_pred_model(rf_rs)
```
## Gradient Boosting
```
gb = GradientBoostingRegressor()
# Rate at which correcting is being made
learning_rate = [0.001, 0.01, 0.1, 0.2]
# Number of trees in Gradient boosting
n_estimators=list(range(500,1000,100))
# Maximum number of levels in a tree
max_depth=list(range(4,9,4))
# Minimum number of samples required to split an internal node
min_samples_split=list(range(4,9,2))
# Minimum number of samples required to be at a leaf node.
min_samples_leaf=[1,2,5,7]
# Number of fearures to be considered at each split
max_features=['auto','sqrt']
# Hyperparameters dict
param_grid = {"learning_rate":learning_rate,
"n_estimators":n_estimators,
"max_depth":max_depth,
"min_samples_split":min_samples_split,
"min_samples_leaf":min_samples_leaf,
"max_features":max_features}
gb_rs = RandomizedSearchCV(estimator = gb, param_distributions = param_grid,cv = 5, random_state=42, n_jobs = 1)
gb_rs.fit(X_train,y_train)
car_pred_model(gb_rs)
gb_rs.best_params_
gb_rs.best_score_
Models = ["Linear Regression","Ridge","Lasso","RandomForest Regressor","GradientBoosting Regressor"]
score_comparison=pd.DataFrame({'Model': Models,'R Squared(Train)': R2_train,'R Squared(Test)': R2_test,'CV score mean(Train)': CV,
'Mean Absolute Error':MAE,'Mean Squared Error':MSE, 'Root Mean Squared Error':RMSE})
score_comparison
file = open('random_forest_regression_model.pkl', 'wb')
# dump information to that file
pickle.dump(rf_rs, file)
```
| github_jupyter |
```
import os
import pickle
from neutrinomass.completions import EffectiveOperator, Completion
from neutrinomass.database import ExoticField
from neutrinomass.database import ModelDataFrame, EXOTICS, TERMS, MVDF
from neutrinomass.completions import EFF_OPERATORS
from neutrinomass.completions import DERIV_EFF_OPERATORS
DATA_PATH = "/home/garj/work/neutrinomass/neutrinomass/database"
DATA = pickle.load(open(os.path.join(DATA_PATH, "unfiltered.p"), "rb"))
UNF = ModelDataFrame.new(data=DATA, exotics=EXOTICS, terms=TERMS)
STR_UNF = UNF.drop_duplicates(["stringent_num"], keep="first")
LAGS = len(STR_UNF)
print(f"Number of neutrino-mass mechanisms: {LAGS}")
DEMO_UNF = UNF.drop_duplicates(["democratic_num"], keep="first")
MODELS = len(DEMO_UNF)
print(f"Number of models: {MODELS}")
STR_MVDF = MVDF.drop_duplicates(["stringent_num"], keep="first")
print(f"Number of filtered neutrino-mass mechanisms: {len(STR_MVDF)}")
DEMO_MVDF = MVDF.drop_duplicates(["democratic_num"], keep="first")
print(f"Number of filtered neutrino-mass mechanisms: {len(DEMO_MVDF)}")
FIL_DF = MVDF.drop_duplicates(['democratic_num', 'dim'], keep="first")
UNF_DF = UNF.drop_duplicates(['democratic_num', 'dim'], keep="first")
print(f"After filtering, there are {len(FIL_DF[FIL_DF['dim'] == 5])} models derived from dimension-5 operators.")
print(f"After filtering, there are {len(FIL_DF[FIL_DF['dim'] == 9])} models derived from dimension-9 operators.")
print(f"After filtering, there are {len(FIL_DF[FIL_DF['dim'] == 11])} models derived from dimension-11 operators.")
print(f"The total of these is {len(FIL_DF[FIL_DF['dim'] == 5]) + len(FIL_DF[FIL_DF['dim'] == 9]) + len(FIL_DF[FIL_DF['dim'] == 11])}")
OPS = {**EFF_OPERATORS, **DERIV_EFF_OPERATORS}
labels, total, demo, dimensions = [], [], [], []
for k in OPS:
labels.append(k)
total.append(len(UNF_DF[UNF_DF["op"] == k]))
demo.append(len(FIL_DF[FIL_DF["op"] == k]))
dimensions.append(OPS[k].mass_dimension)
NHL = STR_UNF.terms[("F,00,0,0,0", "H", "L")]
NHSigma = STR_UNF.terms[("F,00,2,0,0", "H", "L")]
HHXi1 = STR_UNF.terms[("H", "H", "S,00,2,-1,0")]
LLXi1 = STR_UNF.terms[("L", "L", "S,00,2,1,0")]
N = STR_UNF.exotics["F,00,0,0,0"]
Sigma = STR_UNF.exotics["F,00,2,0,0"]
Xi1 = STR_UNF.exotics["S,00,2,1,0"]
N_NHL_lags = len(STR_UNF[STR_UNF["stringent_num"] % NHL == 0])
N_other_lags = len(STR_UNF[(STR_UNF["stringent_num"] % NHL != 0) & (STR_UNF["democratic_num"] % N == 0)])
Sigma_NHSigma_lags = len(STR_UNF[STR_UNF["stringent_num"] % NHSigma == 0])
Sigma_other_lags = len(STR_UNF[(STR_UNF["stringent_num"] % NHSigma != 0) & (STR_UNF["democratic_num"] % Sigma == 0)])
Xi1_HHXi1_lags = len(STR_UNF[STR_UNF["stringent_num"] % HHXi1 == 0])
Xi1_LLXi1_lags = len(STR_UNF[STR_UNF["stringent_num"] % LLXi1 == 0])
Xi1_both_lags = len(STR_UNF[(STR_UNF["stringent_num"] % HHXi1 == 0) & (STR_UNF["stringent_num"] % LLXi1 == 0)])
Xi1_other_lags = len(STR_UNF[(STR_UNF["stringent_num"] % HHXi1 != 0) & (STR_UNF["stringent_num"] % LLXi1 != 0) & (STR_UNF["democratic_num"] % Xi1 == 0)])
N_models = len(DEMO_UNF[DEMO_UNF["democratic_num"] % N == 0])
Sigma_models = len(DEMO_UNF[DEMO_UNF["democratic_num"] % Sigma == 0])
Xi1_models = len(DEMO_UNF[DEMO_UNF["democratic_num"] % Xi1 == 0])
# latex table
print(r"""
\begin{tabular}{ccll}
\toprule
Field & Interactions & Lagrangians & Collected models \\
\midrule
\multirow{2}{*}{$N \sim (\mathbf{1}, \mathbf{1}, 0)_{F}$} & $L H N$ & %s (%s) & \multirow{2}{*}{%s (%s)} \\
& Other & %s (%s) & \\
\midrule
\multirow{2}{*}{$\Sigma \sim (\mathbf{1}, \mathbf{3}, 0)_{F}$} & $L H \Sigma$ & %s (%s) & \multirow{2}{*}{%s (%s)} \\
& Other & %s (%s) & \\
\midrule
\multirow{4}{*}{$\Xi_{1} \sim (\mathbf{1}, \mathbf{3}, 1)_{S}$} & $L L \Xi_{1}$ & %s (%s) & \multirow{4}{*}{%s (%s)} \\
& $H H \Xi_{1}^{\dagger}$ & %s (%s) & \\
& Both & %s (%s) & \\
& Other & %s (%s) & \\
\bottomrule
\end{tabular}
""" % (
f"{N_NHL_lags:,}", f"{100 * N_NHL_lags / LAGS:.1f}\%",
f"{N_models:,}", f"{100 * N_models / MODELS:.1f}\%",
f"{N_other_lags:,}", f"{100 * N_other_lags / LAGS:.1f}\%",
f"{Sigma_NHSigma_lags:,}", f"{100 * Sigma_NHSigma_lags / LAGS:.1f}\%",
f"{Sigma_models:,}", f"{100 * Sigma_models / MODELS:.1f}\%",
f"{Sigma_other_lags:,}", f"{100 * Sigma_other_lags / LAGS:.1f}\%",
f"{Xi1_LLXi1_lags:,}", f"{100 * Xi1_LLXi1_lags / LAGS:.1f}\%",
f"{Xi1_models:,}", f"{100 * Xi1_models / MODELS:.1f}\%",
f"{Xi1_HHXi1_lags:,}", f"{100 * Xi1_HHXi1_lags / LAGS:.1f}\%",
f"{Xi1_both_lags:,}", f"{100 * Xi1_both_lags / LAGS:.1f}\%",
f"{Xi1_other_lags:,}", f"{100 * Xi1_other_lags / LAGS:.1f}\%",
)
)
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
SMALL_SIZE = 15
MEDIUM_SIZE = 20
BIGGER_SIZE = 20
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
plt.tight_layout()
plt.rcParams.update({
"text.usetex": True,
"font.family": "serif",
"font.serif": ["Computer Modern Roman"]}
)
sns.set_palette("muted")
latex_labels = []
for l in labels:
if "pp" in l:
new_l = l.replace("pp", "^{\prime\prime}")
elif "p" in l:
new_l = l.replace("p", "^\prime")
else:
new_l = l
latex_labels.append("$" + new_l + "$")
filter_bar_df = pd.DataFrame(data={
"Operator": latex_labels,
"Unfiltered": total,
"Democratic": demo,
"Dimension": dimensions
})
demo_5 = sum(filter_bar_df[filter_bar_df["Dimension"] == 5]["Democratic"])
demo_7 = sum(filter_bar_df[filter_bar_df["Dimension"] == 7]["Democratic"])
demo_9 = sum(filter_bar_df[filter_bar_df["Dimension"] == 9]["Democratic"])
demo_11 = sum(filter_bar_df[filter_bar_df["Dimension"] == 11]["Democratic"])
unf_5 = sum(filter_bar_df[filter_bar_df["Dimension"] == 5]["Unfiltered"])
unf_7 = sum(filter_bar_df[filter_bar_df["Dimension"] == 7]["Unfiltered"])
unf_9 = sum(filter_bar_df[filter_bar_df["Dimension"] == 9]["Unfiltered"])
unf_11 = sum(filter_bar_df[filter_bar_df["Dimension"] == 11]["Unfiltered"])
barplot_df = pd.DataFrame(
{'Dimension': [5, 7, 9, 11],
'Democratic': [demo_5, demo_7, demo_9, demo_11],
'Unfiltered': [unf_5-demo_5, unf_7-demo_7, unf_9-demo_9, unf_11-demo_11]}
)
ax = barplot_df.plot.bar(x="Dimension", stacked=True, rot=0)
ax.set_yscale("log")
ax.set_ylabel("Number of models")
plt.tight_layout()
plt.savefig("/home/garj/filter_barchart_dimension.pdf")
plt.savefig("/home/garj/filter_barchart_dimension.png")
ops_filter_bar_df = filter_bar_df[filter_bar_df["Dimension"] < 11]
f, ax = plt.subplots(figsize=(7, 10))
sns.barplot(x="Unfiltered", y="Operator", data=ops_filter_bar_df, label="Unfiltered", color=sns.color_palette()[1])
sns.barplot(x="Democratic", y="Operator", data=ops_filter_bar_df, label="Democratic", color=sns.color_palette()[0])
ax.set_xscale("log")
ax.legend(ncol=2, loc="upper right", frameon=True)
ax.set(xlim=(0, 10000), ylabel="Operator", xlabel="Number of models")
ax.text(x=2000, y=7, s="$d < 11$", fontsize=20)
for tick in ax.yaxis.get_major_ticks()[1::2]:
tick.set_pad(40)
plt.tight_layout()
plt.savefig("/home/garj/filter_barchart_operators579.pdf")
plt.savefig("/home/garj/filter_barchart_operators579.png")
import seaborn as sns
import matplotlib.pyplot as plt
ops_filter_bar_df = filter_bar_df[filter_bar_df["Dimension"] == 11]
f, ax = plt.subplots(figsize=(7, 15))
sns.barplot(x="Unfiltered", y="Operator", data=ops_filter_bar_df, label="Unfiltered", color=sns.color_palette()[1])
sns.barplot(x="Democratic", y="Operator", data=ops_filter_bar_df, label="Democratic", color=sns.color_palette()[0])
ax.set_xscale("log")
ax.legend(ncol=2, loc="upper right", frameon=True)
ax.set(xlim=(0, 100000), ylabel="Operator", xlabel="Number of models")
ax.text(x=12000, y=9, s="$d = 11$", fontsize=22)
for tick in ax.yaxis.get_major_ticks()[1::2]:
tick.set_pad(40)
plt.tight_layout()
plt.savefig("/home/garj/filter_barchart_operators11.pdf")
plt.savefig("/home/garj/filter_barchart_operators11.png")
```
| github_jupyter |
```
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
df=pd.read_csv('data_1000.csv')
data=df[['correct_answ','bleu_score','levenstein_sim','cosine_sim','jaccard_sim']]
data.head(10)
data.describe()
data.boxplot(by='correct_answ', column=['bleu_score', 'levenstein_sim', 'cosine_sim', 'jaccard_sim'],
grid=True, figsize=(15,15))
X=data[['bleu_score','levenstein_sim','cosine_sim','jaccard_sim']]
y=data['correct_answ']
X.hist(bins=50,figsize=(20,15))
from pandas.plotting import scatter_matrix
scatter_matrix(X, figsize=(14, 10))
corr_matrix = data.corr()
corr_matrix["correct_answ"].sort_values(ascending=False)
data['cos_lev']=data['cosine_sim']+data['levenstein_sim']
data['cos_bleu']=data['cosine_sim']+data['bleu_score']
data['cos_jac']=data['cosine_sim']+data['jaccard_sim']
data['lev_bleu']=data['levenstein_sim']+data['bleu_score']
data['lev_jac']=data['levenstein_sim']+data['jaccard_sim']
data['bleu_jac']=data['bleu_score']+data['jaccard_sim']
corr_matrix = data.corr()
corr_matrix["correct_answ"].sort_values(ascending=False)
corr_matrix
X=data[['bleu_score','levenstein_sim','cosine_sim','jaccard_sim','cos_lev','cos_bleu','cos_jac','lev_bleu','lev_jac','bleu_jac']]
y=data[['correct_answ']]
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data, data["correct_answ"]):
strat_train_set = data.loc[train_index]
strat_test_set = data.loc[test_index]
data["correct_answ"].value_counts() / len(data)
strat_train_set["correct_answ"].value_counts() / len(strat_train_set)
strat_test_set["correct_answ"].value_counts() / len(strat_test_set)
training = strat_train_set.copy()
testing=strat_test_set.copy()
X_training=training[['bleu_score','levenstein_sim','cosine_sim','jaccard_sim','cos_lev','cos_bleu','cos_jac','lev_bleu','lev_jac','bleu_jac']]
y_training=training[['correct_answ']]
X_testing=testing[['bleu_score','levenstein_sim','cosine_sim','jaccard_sim','cos_lev','cos_bleu','cos_jac','lev_bleu','lev_jac','bleu_jac']]
y_testing=testing[['correct_answ']]
from sklearn.neighbors import KNeighborsClassifier
clf = KNeighborsClassifier(n_neighbors=2).fit(X_training,y_training)
y_training_prediction = clf.predict(X_training)
y_testing_prediction = clf.predict(X_testing)
#K_fold confusion_matrix for training sets & Precision score & Recall score
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import precision_score, recall_score
from sklearn.base import clone
from sklearn.metrics import confusion_matrix
from sklearn.metrics import f1_score
skfolds = StratifiedKFold(n_splits=3, random_state=42)
for train_index, test_index in skfolds.split(X_training, y_training):
clone_clf = clone(clf)
X_train_folds = X_training.iloc[train_index]
y_train_folds = y_training.iloc[train_index]
X_test_fold = X_training.iloc[test_index]
y_test_fold = y_training.iloc[test_index]
clone_clf.fit(X_train_folds, y_train_folds)
y_pred = clone_clf.predict(X_test_fold)
recall_score(y_test_fold,y_pred)
print(confusion_matrix(y_test_fold,y_pred))
print('precision_score: ',precision_score(y_test_fold,y_pred))
print('recall_score: ',recall_score(y_test_fold,y_pred))
print('f1_socre: ',f1_score(y_test_fold, y_pred))
print('')
print('\n')
""" n_correct = sum(y_pred == y_test_fold)
print(n_correct / len(y_pred)) """
# cross_validation score for training sets
from sklearn.model_selection import cross_val_score
cross_val_score(clf, X_training, y_training, cv=3, scoring="accuracy")
# testing data validation
from sklearn.metrics import confusion_matrix
print('confusion_matrix: \n',confusion_matrix(y_testing,y_testing_prediction))
print('precision_score: ',precision_score(y_testing,y_testing_prediction))
print('recall_score: ',recall_score(y_testing,y_testing_prediction))
print('f1_socre: ',f1_score(y_testing,y_testing_prediction))
from sklearn.model_selection import cross_val_score
print('accuracy: ',cross_val_score(clf, X_testing, y_testing, cv=3, scoring="accuracy"))
import matplotlib.pyplot
```
| github_jupyter |
## CIFAR10 using a simple deep networks
Credits: \
https://medium.com/@sergioalves94/deep-learning-in-pytorch-with-cifar-10-dataset-858b504a6b54 \
https://jovian.ai/aakashns/05-cifar10-cnn
```
import torch
import torchvision
import numpy as np
import matplotlib.pyplot as plt
import torch.nn as nn
import torch.nn.functional as F
from torchvision.datasets import CIFAR10
from torchvision.transforms import ToTensor
from torchvision.utils import make_grid
from torch.utils.data.dataloader import DataLoader
from torch.utils.data import random_split
from torchsummary import summary
%matplotlib inline
```
### Exploring the data
```
# Dowload the dataset
dataset = CIFAR10(root='data/', download=True, transform=ToTensor())
test_dataset = CIFAR10(root='data/', train=False, transform=ToTensor())
```
Import the datasets and convert the images into PyTorch tensors.
```
classes = dataset.classes
classes
class_count = {}
for _, index in dataset:
label = classes[index]
if label not in class_count:
class_count[label] = 0
class_count[label] += 1
class_count
```
Split the dataset into two groups: training and validation datasets.
```
torch.manual_seed(43)
val_size = 5000
train_size = len(dataset) - val_size
train_ds, val_ds = random_split(dataset, [train_size, val_size])
len(train_ds), len(val_ds)
batch_size=128
train_loader = DataLoader(train_ds, batch_size, shuffle=True, num_workers=4, pin_memory=True)
val_loader = DataLoader(val_ds, batch_size*2, num_workers=4, pin_memory=True)
test_loader = DataLoader(test_dataset, batch_size*2, num_workers=4, pin_memory=True)
```
we set `pin_memory=True` because we will push the data from the CPU into the GPU and this parameter lets theDataLoader allocate the samples in page-locked memory, which speeds-up the transfer
```
for images, _ in train_loader:
print('images.shape:', images.shape)
plt.figure(figsize=(16,8))
plt.axis('off')
plt.imshow(make_grid(images, nrow=16).permute((1, 2, 0)))
break
```
### Model
```
def accuracy(outputs, labels):
_, preds = torch.max(outputs, dim=1)
return torch.tensor(torch.sum(preds == labels).item() / len(preds))
class ImageClassificationBase(nn.Module):
def training_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
return loss
def validation_step(self, batch):
images, labels = batch
out = self(images) # Generate predictions
loss = F.cross_entropy(out, labels) # Calculate loss
acc = accuracy(out, labels) # Calculate accuracy
return {'val_loss': loss.detach(), 'val_acc': acc}
def validation_epoch_end(self, outputs):
batch_losses = [x['val_loss'] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean() # Combine losses
batch_accs = [x['val_acc'] for x in outputs]
epoch_acc = torch.stack(batch_accs).mean() # Combine accuracies
return {'val_loss': epoch_loss.item(), 'val_acc': epoch_acc.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss: {:.4f}, val_acc: {:.4f}".format(epoch,
result['val_loss'], result['val_acc']))
def evaluate(model, val_loader):
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
for batch in train_loader:
loss = model.training_step(batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
model.epoch_end(epoch, result)
history.append(result)
return history
torch.cuda.is_available()
def get_default_device():
"""Pick GPU if available, else CPU"""
if torch.cuda.is_available():
return torch.device('cuda')
else:
return torch.device('cpu')
device = get_default_device()
device
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list,tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
"""Wrap a dataloader to move data to a device"""
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
"""Yield a batch of data after moving it to device"""
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
"""Number of batches"""
return len(self.dl)
def plot_losses(history):
losses = [x['val_loss'] for x in history]
plt.plot(losses, '-x')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('Loss vs. No. of epochs')
train_loader = DeviceDataLoader(train_loader, device)
val_loader = DeviceDataLoader(val_loader, device)
test_loader = DeviceDataLoader(test_loader, device)
```
### Training the model
```
input_size = 3*32*32
output_size = 10
class CIFAR10Model(ImageClassificationBase):
def __init__(self):
super().__init__()
self.linear1 = nn.Linear(input_size, 256)
self.linear2 = nn.Linear(256, 128)
self.linear3 = nn.Linear(128, output_size)
def forward(self, xb):
# Flatten images into vectors
out = xb.view(xb.size(0), -1)
# Apply layers & activation functions
out = self.linear1(out)
out = F.relu(out)
out = self.linear2(out)
out = F.relu(out)
out = self.linear3(out)
return out
model = to_device(CIFAR10Model(), device)
summary(model, (3, 32, 32))
history = [evaluate(model, val_loader)]
history
history += fit(10, 1e-1, model, train_loader, val_loader)
history += fit(10, 1e-2, model, train_loader, val_loader)
history += fit(10, 1e-3, model, train_loader, val_loader)
plot_losses(history)
def plot_accuracies(history):
accuracies = [x['val_acc'] for x in history]
plt.plot(accuracies, '-x')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.title('Accuracy vs. No. of epochs')
plot_accuracies(history)
## test set:
evaluate(model, test_loader)
```
| github_jupyter |
# Kinetics 데이터 세트로 ECO용 DataLoader 작성
Kineteics 동영상 데이터를 사용해, ECO용 DataLoader를 만듭니다
# 9.4 학습 목표
1. Kinetics 동영상 데이터 세트를 다운로드할 수 있다
2. 동영상 데이터를 프레임별 화상 데이터로 변환할 수 있다
3. ECO에서 사용하기 위한 DataLoader를 구현할 수 있다
# 사전 준비
- 이 책의 지시에 따라 Kinetics 동영상 데이터와, 화상 데이터를 frame별로 화상 데이터로 변환하는 조작을 수행해주세요
- 가상 환경 pytorch_p36에서 실행합니다
```
import os
from PIL import Image
import csv
import numpy as np
import torch
import torch.utils.data
from torch import nn
import torchvision
```
# 동영상을 화상 데이터로 만든 폴더의 파일 경로 리스트를 작성
```
def make_datapath_list(root_path):
"""
동영상을 화상 데이터로 만든 폴더의 파일 경로 리스트를 작성한다.
root_path : str, 데이터 폴더로의 root 경로
Returns: ret : video_list, 동영상을 화상 데이터로 만든 폴더의 파일 경로 리스트
"""
# 동영상을 화상 데이터로 만든 폴더의 파일 경로 리스트
video_list = list()
# root_path의 클래스 종류와 경로를 취득
class_list = os.listdir(path=root_path)
# 각 클래스의 동영상 파일을 화상으로 만든 폴더의 경로를 취득
for class_list_i in (class_list): # 클래스별로 루프
# 클래스의 폴더 경로를 취득
class_path = os.path.join(root_path, class_list_i)
# 각 클래스의 폴더 내 화상 폴더를 취득하는 루프
for file_name in os.listdir(class_path):
# 파일명과 확장자로 분할
name, ext = os.path.splitext(file_name)
# mp4 파일이 아니거나, 폴더 등은 무시
if ext == '.mp4':
continue
# 동영상 파일을 화상으로 분할해 저장한 폴더의 경로를 취득
video_img_directory_path = os.path.join(class_path, name)
# vieo_list에 추가
video_list.append(video_img_directory_path)
return video_list
# 동작 확인
root_path = './data/kinetics_videos/'
video_list = make_datapath_list(root_path)
print(video_list[0])
print(video_list[1])
```
# 동영상 전처리 클래스를 작성
```
class VideoTransform():
"""
동영상을 화상으로 만드는 전처리 클래스. 학습시와 추론시 다르게 작동합니다.
동영상을 화상으로 분할하고 있으므로, 분할된 화상을 한꺼번에 전처리하는 점에 주의하십시오.
"""
def __init__(self, resize, crop_size, mean, std):
self.data_transform = {
'train': torchvision.transforms.Compose([
# DataAugumentation() # 이번에는 생략
GroupResize(int(resize)), # 화상을 한꺼번에 리사이즈
GroupCenterCrop(crop_size), # 화상을 한꺼번에 center crop
GroupToTensor(), # 데이터를 PyTorch 텐서로
GroupImgNormalize(mean, std), # 데이터를 표준화
Stack() # 여러 화상을 frames차원으로 결합시킨다
]),
'val': torchvision.transforms.Compose([
GroupResize(int(resize)), # 화상을 한꺼번에 리사이즈
GroupCenterCrop(crop_size), # 화상을 한꺼번에 center crop
GroupToTensor(), # 데이터를 PyTorch 텐서로
GroupImgNormalize(mean, std), # 데이터를 표준화
Stack() # 여러 화상을 frames차원으로 결합시킨다
])
}
def __call__(self, img_group, phase):
"""
Parameters
----------
phase : 'train' or 'val'
전처리 모드 지정
"""
return self.data_transform[phase](img_group)
# 전처리로 사용할 클래스들을 정의
class GroupResize():
'''화상 크기를 한꺼번에 재조정(rescale)하는 클래스.
화상의 짧은 변의 길이가 resize로 변환된다.
화면 비율은 유지된다.
'''
def __init__(self, resize, interpolation=Image.BILINEAR):
'''rescale 처리 준비'''
self.rescaler = torchvision.transforms.Resize(resize, interpolation)
def __call__(self, img_group):
'''img_group(리스트)의 각 img에 rescale 실시'''
return [self.rescaler(img) for img in img_group]
class GroupCenterCrop():
'''화상을 한꺼번에 center crop 하는 클래스.
(crop_size, crop_size)의 화상을 잘라낸다.
'''
def __init__(self, crop_size):
'''center crop 처리를 준비'''
self.ccrop = torchvision.transforms.CenterCrop(crop_size)
def __call__(self, img_group):
'''img_group(리스트)의 각 img에 center crop 실시'''
return [self.ccrop(img) for img in img_group]
class GroupToTensor():
'''화상을 한꺼번에 텐서로 만드는 클래스.
'''
def __init__(self):
'''텐서화하는 처리를 준비'''
self.to_tensor = torchvision.transforms.ToTensor()
def __call__(self, img_group):
'''img_group(리스트)의 각 img에 텐서화 실시
0부터 1까지가 아니라, 0부터 255까지를 다루므로, 255를 곱해서 계산한다.
0부터 255로 다루는 것은, 학습된 데이터 형식에 맞추기 위함
'''
return [self.to_tensor(img)*255 for img in img_group]
class GroupImgNormalize():
'''화상을 한꺼번에 표준화하는 클래스.
'''
def __init__(self, mean, std):
'''표준화 처리를 준비'''
self.normlize = torchvision.transforms.Normalize(mean, std)
def __call__(self, img_group):
'''img_group(리스트)의 각 img에 표준화 실시'''
return [self.normlize(img) for img in img_group]
class Stack():
'''화상을 하나의 텐서로 정리하는 클래스.
'''
def __call__(self, img_group):
'''img_group은 torch.Size([3, 224, 224])를 요소로 하는 리스트
'''
ret = torch.cat([(x.flip(dims=[0])).unsqueeze(dim=0)
for x in img_group], dim=0) # frames 차원으로 결합
# x.flip(dims=[0])은 색상 채널을 RGB에서 BGR으로 순서를 바꾸고 있습니다(원래의 학습 데이터가 BGR이었기 때문입니다)
# unsqueeze(dim=0)은 새롭게 frames용의 차원을 작성하고 있습니다
return ret
```
# Dataset 작성
```
# Kinetics-400의 라벨명을 ID로 변환하는 사전과, 반대로 ID를 라벨명으로 변환하는 사전을 준비
def get_label_id_dictionary(label_dicitionary_path='./video_download/kinetics_400_label_dicitionary.csv'):
label_id_dict = {}
id_label_dict = {}
with open(label_dicitionary_path, encoding="utf-8_sig") as f:
# 읽어들이기
reader = csv.DictReader(f, delimiter=",", quotechar='"')
# 1행씩 읽어, 사전형 변수에 추가합니다
for row in reader:
label_id_dict.setdefault(
row["class_label"], int(row["label_id"])-1)
id_label_dict.setdefault(
int(row["label_id"])-1, row["class_label"])
return label_id_dict, id_label_dict
# 확인
label_dicitionary_path = './video_download/kinetics_400_label_dicitionary.csv'
label_id_dict, id_label_dict = get_label_id_dictionary(label_dicitionary_path)
label_id_dict
class VideoDataset(torch.utils.data.Dataset):
"""
동영상 Dataset
"""
def __init__(self, video_list, label_id_dict, num_segments, phase, transform, img_tmpl='image_{:05d}.jpg'):
self.video_list = video_list # 동영상 폴더의 경로 리스트
self.label_id_dict = label_id_dict # 라벨명을 id로 변환하는 사전형 변수
self.num_segments = num_segments # 동영상을 어떻게 분할해 사용할지를 결정
self.phase = phase # train or val
self.transform = transform # 전처리
self.img_tmpl = img_tmpl # 읽어들일 화상 파일명의 템플릿
def __len__(self):
'''동영상 수를 반환'''
return len(self.video_list)
def __getitem__(self, index):
'''
전처리한 화상들의 데이터와 라벨, 라벨 ID를 취득
'''
imgs_transformed, label, label_id, dir_path = self.pull_item(index)
return imgs_transformed, label, label_id, dir_path
def pull_item(self, index):
'''전처리한 화상들의 데이터와 라벨, 라벨 ID를 취득'''
# 1. 화상들을 리스트에서 읽기
dir_path = self.video_list[index] # 화상이 저장된 폴더
indices = self._get_indices(dir_path) # 읽어들일 화상 idx를 구하기
img_group = self._load_imgs(
dir_path, self.img_tmpl, indices) # 리스트로 읽기
# 2. 라벨을 취득해 id로 변환
label = (dir_path.split('/')[3].split('/')[0])
label_id = self.label_id_dict[label] # id를 취득
# 3. 전처리 실시
imgs_transformed = self.transform(img_group, phase=self.phase)
return imgs_transformed, label, label_id, dir_path
def _load_imgs(self, dir_path, img_tmpl, indices):
'''화상을 한꺼번에 읽어들여, 리스트화하는 함수'''
img_group = [] # 화상을 저장할 리스트
for idx in indices:
# 화상 경로 취득
file_path = os.path.join(dir_path, img_tmpl.format(idx))
# 화상 읽기
img = Image.open(file_path).convert('RGB')
# 리스트에 추가
img_group.append(img)
return img_group
def _get_indices(self, dir_path):
"""
동영상 전체를 self.num_segment로 분할했을 때의 동영상 idx의 리스트를 취득
"""
# 동영상 프레임 수 구하기
file_list = os.listdir(path=dir_path)
num_frames = len(file_list)
# 동영상의 간격을 구하기
tick = (num_frames) / float(self.num_segments)
# 250 / 16 = 15.625
# 동영상 간격으로 꺼낼 때 idx를 리스트로 구하기
indices = np.array([int(tick / 2.0 + tick * x)
for x in range(self.num_segments)])+1
# 250frame에서 16frame 추출의 경우
# indices = [ 8 24 40 55 71 86 102 118 133 149 165 180 196 211 227 243]
return indices
# 동작 확인
# vieo_list 작성
root_path = './data/kinetics_videos/'
video_list = make_datapath_list(root_path)
# 전처리 설정
resize, crop_size = 224, 224
mean, std = [104, 117, 123], [1, 1, 1]
video_transform = VideoTransform(resize, crop_size, mean, std)
# Dataset 작성
# num_segments는 동영상을 어떻게 분할해 사용할지 정한다
val_dataset = VideoDataset(video_list, label_id_dict, num_segments=16,
phase="val", transform=video_transform, img_tmpl='image_{:05d}.jpg')
# 데이터를 꺼내는 예
# 출력은 imgs_transformed, label, label_id, dir_path
index = 0
print(val_dataset.__getitem__(index)[0].shape) # 동영상의 텐서
print(val_dataset.__getitem__(index)[1]) # 라벨
print(val_dataset.__getitem__(index)[2]) # 라벨ID
print(val_dataset.__getitem__(index)[3]) # 동영상 경로
# DataLoader로 합니다
batch_size = 8
val_dataloader = torch.utils.data.DataLoader(
val_dataset, batch_size=batch_size, shuffle=False)
# 동작 확인
batch_iterator = iter(val_dataloader) # 반복자로 변환
imgs_transformeds, labels, label_ids, dir_path = next(
batch_iterator) # 1번째 요소를 꺼낸다
print(imgs_transformeds.shape)
```
끝
| github_jupyter |
```
from IPython.display import HTML
# Cell visibility - COMPLETE:
#tag = HTML('''<style>
#div.input {
# display:none;
#}
#</style>''')
#display(tag)
#Cell visibility - TOGGLE:
tag = HTML('''<script>
code_show=true;
function code_toggle() {
if (code_show){
$('div.input').hide()
} else {
$('div.input').show()
}
code_show = !code_show
}
$( document ).ready(code_toggle);
</script>
<p style="text-align:right">
Promijeni vidljivost <a href="javascript:code_toggle()">ovdje</a>.</p>''')
display(tag)
```
## Kompleksni brojevi u polarnom obliku
U ovome interaktivnom primjeru, kompleksni brojevi se vizualiziraju u kompleksnoj ravnini, a određuju se koristeći polarni oblik. Kompleksni brojevi se, dakle, određuju modulom (duljinom odgovarajućeg vektora) i argumentom (kutom odgovarajućeg vektora). Možete testirati osnovne matematičke operacije nad kompleksnim brojevima: zbrajanje, oduzimanje, množenje i dijeljenje. Svi se rezultati prikazuju na odgovarajućem grafu, kao i u matematičkoj notaciji zasnovanoj na polarnom obliku kompleksnog broja.
Kompleksnim brojevima možete manipulirati izravno na grafu (jednostavnim klikom) i / ili istovremeno koristiti odgovarajuća polja za unos modula i argumenta. Kako bi se osigurala bolja vidljivost vektora na grafu, modul kompleksnog broja je ograničen na $\pm10$.
```
%matplotlib notebook
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
import ipywidgets as widgets
from IPython.display import display
from IPython.display import HTML
import math
red_patch = mpatches.Patch(color='red', label='z1')
blue_patch = mpatches.Patch(color='blue', label='z2')
green_patch = mpatches.Patch(color='green', label='z1 + z2')
yellow_patch = mpatches.Patch(color='yellow', label='z1 - z2')
black_patch = mpatches.Patch(color='black', label='z1 * z2')
magenta_patch = mpatches.Patch(color='magenta', label='z1 / z2')
# Init values
XLIM = 5
YLIM = 5
vectors_index_first = False;
V = [None, None]
V_complex = [None, None]
# Complex plane
fig = plt.figure(num='Kompleksni brojevi u polarnom obliku')
ax = fig.add_subplot(1, 1, 1)
def get_interval(lim):
if lim <= 10:
return 1
if lim < 75:
return 5
if lim > 100:
return 25
return 10
def set_ticks():
XLIMc = int((XLIM / 10) + 1) * 10
YLIMc = int((YLIM / 10) + 1) * 10
if XLIMc > 150:
XLIMc += 10
if YLIMc > 150:
YLIMc += 10
xstep = get_interval(XLIMc)
ystep = get_interval(YLIMc)
#print(stepx, stepy)
major_ticks = np.arange(-XLIMc, XLIMc, xstep)
major_ticks_y = np.arange(-YLIMc, YLIMc, ystep)
ax.set_xticks(major_ticks)
ax.set_yticks(major_ticks_y)
ax.grid(which='both')
def clear_plot():
plt.cla()
set_ticks()
ax.set_xlabel('Re')
ax.set_ylabel('Im')
plt.ylim([-YLIM, YLIM])
plt.xlim([-XLIM, XLIM])
plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch])
clear_plot()
set_ticks()
plt.show()
set_ticks()
# Conversion functions
def com_to_trig(real, im):
r = math.sqrt(real**2 + im**2)
if abs(real) <= 1e-6 and im > 0:
arg = 90
return r, arg
if abs(real) < 1e-6 and im < 0:
arg = 270
return r, arg
if abs(im) < 1e-6 and real > 0:
arg = 0
return r, arg
if abs(im) < 1e-6 and real < 0:
arg = 180
return r, arg
if im != 0 and real !=0:
arg = np.arctan(im / real) * 180 / np.pi
if im > 0 and real < 0:
arg += 180
if im < 0 and real > 0:
arg +=360
if im < 0 and real < 0:
arg += 180
return r, arg
if abs(im) < 1e-6 and abs(real) < 1e-6:
arg = 0
return r, arg
def trig_to_com(r, arg):
re = r * np.cos(arg * np.pi / 180.)
im = r * np.sin(arg * np.pi / 180.)
return (re, im)
# Set a complex number using direct manipulation on the plot
def set_vector(i, data_x, data_y):
clear_plot()
V.pop(i)
V.insert(i, (0, 0, round(data_x, 2), round(data_y, 2)))
V_complex.pop(i)
V_complex.insert(i, complex(round(data_x, 2), round(data_y, 2)))
if i == 0:
ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True)
z, arg = com_to_trig(data_x, data_y)
a1.value = round(z, 2)
b1.value = round(arg, 2)
if V[1] != None:
ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True)
elif i == 1:
ax.arrow(*V[1], head_width=0.25, head_length=0.5, color="b", length_includes_head=True)
z, arg = com_to_trig(data_x, data_y)
a2.value = round(z, 2)
b2.value = round(arg, 2)
if V[0] != None:
ax.arrow(*V[0], head_width=0.25, head_length=0.5, color="r", length_includes_head=True)
max_bound()
def onclick(event):
global vectors_index_first
vectors_index_first = not vectors_index_first
x = event.xdata
y = event.ydata
if (x > 10):
x = 10.0
if (x < - 10):
x = -10.0
if (y > 10):
y = 10.0
if (y < - 10):
y = -10.0
if vectors_index_first:
set_vector(0, x, y)
else:
set_vector(1, x, y)
fig.canvas.mpl_connect('button_press_event', onclick)
# Widgets
a1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 10, step = 0.5)
b1 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 360, step = 10)
button_set_z1 = widgets.Button(description="Prikaži z1")
a2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 10, step = 0.5)
b2 = widgets.BoundedFloatText(layout=widgets.Layout(width='10%'), min = 0, max = 360, step = 10)
button_set_z2 = widgets.Button(description="Prikaži z2")
box_layout_z1 = widgets.Layout(border='solid red', padding='10px')
box_layout_z2 = widgets.Layout(border='solid blue', padding='10px')
box_layout_opers = widgets.Layout(border='solid black', padding='10px')
items_z1 = [widgets.Label("z1: Duljina (|z1|) = "), a1, widgets.Label("Kut (\u2221)= "), b1, button_set_z1]
items_z2 = [widgets.Label("z2: Duljina (|z2|) = "), a2, widgets.Label("Kut (\u2221)= "), b2, button_set_z2]
display(widgets.Box(children=items_z1, layout=box_layout_z1))
display(widgets.Box(children=items_z2, layout=box_layout_z2))
button_add = widgets.Button(description="Zbroji")
button_substract = widgets.Button(description="Oduzmi")
button_multiply = widgets.Button(description="Pomnoži")
button_divide = widgets.Button(description="Podijeli")
button_reset = widgets.Button(description="Resetiraj")
output = widgets.Output()
print('Operacije nad kompleksnim brojevima:')
items_operations = [button_add, button_substract, button_multiply, button_divide, button_reset]
display(widgets.Box(children=items_operations))
display(output)
# Set complex number using input widgets (Text and Button)
def on_button_set_z1_clicked(b):
z1_old = V[0];
re, im = trig_to_com(a1.value, b1.value)
z1_new = (0, 0, re, im)
if z1_old != z1_new:
set_vector(0, re, im)
change_lims()
def on_button_set_z2_clicked(b):
z2_old = V[1];
re, im = trig_to_com(a2.value, b2.value)
z2_new = (0, 0, re, im)
if z2_old != z2_new:
set_vector(1, re, im)
change_lims()
# Complex number operations:
def perform_operation(oper):
global XLIM, YLIM
if (V_complex[0] != None) and (V_complex[1] != None):
if (oper == '+'):
result = V_complex[0] + V_complex[1]
v_color = "g"
elif (oper == '-'):
result = V_complex[0] - V_complex[1]
v_color = "y"
elif (oper == '*'):
result = V_complex[0] * V_complex[1]
v_color = "black"
elif (oper == '/'):
result = V_complex[0] / V_complex[1]
v_color = "magenta"
result = complex(round(result.real, 2), round(result.imag, 2))
ax.arrow(0, 0, result.real, result.imag, head_width=0.25, head_length=0.15, color=v_color, length_includes_head=True)
if abs(result.real) > XLIM:
XLIM = round(abs(result.real) + 1)
if abs(result.imag) > YLIM:
YLIM = round(abs(result.imag) + 1)
change_lims()
with output:
z1, ang1 = com_to_trig(V_complex[0].real, V_complex[0].imag )
z2, ang2 = com_to_trig(V_complex[1].real, V_complex[1].imag)
z3, ang3 = com_to_trig(result.real, result.imag)
z1 = round(z1, 2)
ang1 = round(ang1, 2)
z2 = round(z2, 2)
ang2 = round(ang2, 2)
z3 = round(z3, 2)
ang3 = round(ang3, 2)
print("{}*(cos({}) + i*sin({}))".format(z1,ang1,ang1), oper,
"{}*(cos({}) + i*sin({}))".format(z2,ang2,ang2), "=",
"{}*(cos({}) + i*sin({}))".format(z3,ang3,ang3))
print('{} \u2221{}'.format(z1, ang1), oper,
'{} \u2221{}'.format(z2, ang2), "=",
'{} \u2221{}'.format(z3, ang3))
def on_button_add_clicked(b):
perform_operation("+")
def on_button_substract_clicked(b):
perform_operation("-")
def on_button_multiply_clicked(b):
perform_operation("*")
def on_button_divide_clicked(b):
perform_operation("/")
# Plot init methods
def on_button_reset_clicked(b):
global V, V_complex, XLIM, YLIM
with output:
output.clear_output()
clear_plot()
vectors_index_first = False;
V = [None, None]
V_complex = [None, None]
a1.value = 0
b1.value = 0
a2.value = 0
b2.value = 0
XLIM = 5
YLIM = 5
change_lims()
def clear_plot():
plt.cla()
set_ticks()
ax.set_xlabel('Re')
ax.set_ylabel('Im')
plt.ylim([-YLIM, YLIM])
plt.xlim([-XLIM, XLIM])
plt.legend(handles=[red_patch, blue_patch, green_patch, yellow_patch, black_patch, magenta_patch])
def change_lims():
set_ticks()
plt.ylim([-YLIM, YLIM])
plt.xlim([-XLIM, XLIM])
set_ticks()
def max_bound():
global XLIM, YLIM
mx = 0
my = 0
if V_complex[0] != None:
z = V_complex[0]
if abs(z.real) > mx:
mx = abs(z.real)
if abs(z.imag) > my:
my = abs(z.imag)
if V_complex[1] != None:
z = V_complex[1]
if abs(z.real) > mx:
mx = abs(z.real)
if abs(z.imag) > my:
my = abs(z.imag)
if mx > XLIM:
XLIM = round(mx + 1)
elif mx <=5:
XLIM = 5
if my > YLIM:
YLIM = round(my + 1)
elif my <=5:
YLIM = 5
change_lims()
# Button events
button_set_z1.on_click(on_button_set_z1_clicked)
button_set_z2.on_click(on_button_set_z2_clicked)
button_add.on_click(on_button_add_clicked)
button_substract.on_click(on_button_substract_clicked)
button_multiply.on_click(on_button_multiply_clicked)
button_divide.on_click(on_button_divide_clicked)
button_reset.on_click(on_button_reset_clicked)
```
| github_jupyter |
# Tutorial 09: Standard problem 5
> Interactive online tutorial:
> [](https://mybinder.org/v2/gh/ubermag/oommfc/master?filepath=docs%2Fipynb%2Findex.ipynb)
## Problem specification
The sample is a thin film cuboid with dimensions:
- length $l_{x} = 100 \,\text{nm}$,
- width $l_{y} = 100 \,\text{nm}$, and
- thickness $l_{z} = 10 \,\text{nm}$.
The material parameters (similar to permalloy) are:
- exchange energy constant $A = 1.3 \times 10^{-11} \,\text{J/m}$,
- magnetisation saturation $M_\text{s} = 8 \times 10^{5} \,\text{A/m}$.
Dynamics parameters are: $\gamma_{0} = 2.211 \times 10^{5} \,\text{m}\,\text{A}^{-1}\,\text{s}^{-1}$ and Gilbert damping $\alpha=0.02$.
In the standard problem 5, the system is firstly relaxed at zero external magnetic field, starting from the vortex state. Secondly spin-polarised current is applied in the $x$ direction with $u_{x} = -72.35$ and $\beta=0.05$.
More detailed specification of Standard problem 5 can be found in Ref. 1.
## Simulation
In the first step, we import the required `discretisedfield` and `oommfc` modules.
```
import oommfc as oc
import discretisedfield as df
import micromagneticmodel as mm
```
Now, we can set all required geometry and material parameters.
```
# Geometry
lx = 100e-9 # x dimension of the sample(m)
ly = 100e-9 # y dimension of the sample (m)
lz = 10e-9 # sample thickness (m)
dx = dy = dz = 5e-9 #discretisation cell (nm)
# Material (permalloy) parameters
Ms = 8e5 # saturation magnetisation (A/m)
A = 1.3e-11 # exchange energy constant (J/m)
# Dynamics (LLG equation) parameters
gamma0 = 2.211e5 # gyromagnetic ratio (m/As)
alpha = 0.1 # Gilbert damping
ux = -72.35 # velocity in x direction
beta = 0.05 # non-adiabatic STT parameter
```
As usual, we create the system object with `stdprob5` name.
```
system = mm.System(name='stdprob5')
```
The mesh is created by providing two points `p1` and `p2` between which the mesh domain spans and the size of a discretisation cell. We choose the discretisation to be $(5, 5, 5) \,\text{nm}$.
```
%matplotlib inline
region = df.Region(p1=(0, 0, 0), p2=(lx, ly, lz))
mesh = df.Mesh(region=region, cell=(dx, dy, dz))
mesh.k3d()
```
**Hamiltonian:** In the second step, we define the system's Hamiltonian. In this standard problem, the Hamiltonian contains only exchange and demagnetisation energy terms. Please note that in the first simulation stage, there is no applied external magnetic field. Therefore, we do not add Zeeman energy term to the Hamiltonian.
```
system.energy = mm.Exchange(A=A) + mm.Demag()
system.energy
```
**Magnetisation:** We initialise the system using the initial magnetisation function.
```
def m_vortex(pos):
x, y, z = pos[0]/1e-9-50, pos[1]/1e-9-50, pos[2]/1e-9
return (-y, x, 10)
system.m = df.Field(mesh, dim=3, value=m_vortex, norm=Ms)
system.m.plane(z=0).mpl()
```
**Dynamics:** In the first (relaxation) stage, we minimise the system's energy and therefore we do not need to specify the dynamics equation.
**Minimisation:** Now, we minimise the system's energy using `MinDriver`.
```
md = oc.MinDriver()
md.drive(system)
system.m.plane(z=0).mpl()
```
## Spin-polarised current
In the second part of simulation, we need to specify the dynamics equation for the system.
```
system.dynamics += mm.Precession(gamma0=gamma0) + mm.Damping(alpha=alpha) + mm.ZhangLi(u=ux, beta=beta)
system.dynamics
```
Now, we can drive the system for $8 \,\text{ns}$ and save the magnetisation in $n=100$ steps.
```
td = oc.TimeDriver()
td.drive(system, t=8e-9, n=100)
```
The vortex after $8 \,\text{ns}$ is now displaced from the centre.
```
system.m.plane(z=0).mpl()
system.table.data.plot('t', 'mx')
```
## References
[1] µMAG Site Directory: http://www.ctcms.nist.gov/~rdm/mumag.org.html
| github_jupyter |
Mount my google drive, where I stored the dataset.
```
from google.colab import drive
drive.mount('/content/drive')
```
**Download dependencies**
```
!pip3 install sklearn matplotlib GPUtil
!pip3 install torch torchvision
```
**Download Data**
In order to acquire the dataset please navigate to:
https://ieee-dataport.org/documents/cervigram-image-dataset
Unzip the dataset into the folder "dataset".
For your environment, please adjust the paths accordingly.
```
!rm -vrf "dataset"
!mkdir "dataset"
# !cp -r "/content/drive/My Drive/Studiu doctorat leziuni cervicale/cervigram-image-dataset-v2.zip" "dataset/cervigram-image-dataset-v2.zip"
!cp -r "cervigram-image-dataset-v2.zip" "dataset/cervigram-image-dataset-v2.zip"
!unzip "dataset/cervigram-image-dataset-v2.zip" -d "dataset"
```
**Constants**
For your environment, please modify the paths accordingly.
```
# TRAIN_PATH = '/content/dataset/data/train/'
# TEST_PATH = '/content/dataset/data/test/'
TRAIN_PATH = 'dataset/data/train/'
TEST_PATH = 'dataset/data/test/'
CROP_SIZE = 260
IMAGE_SIZE = 224
BATCH_SIZE = 100
```
**Imports**
```
import torch as t
import torchvision as tv
import numpy as np
import PIL as pil
import matplotlib.pyplot as plt
from torchvision.datasets import ImageFolder
from torch.utils.data import DataLoader
from torch.nn import Linear, BCEWithLogitsLoss
import sklearn as sk
import sklearn.metrics
from os import listdir
import time
import random
import GPUtil
```
**Memory Stats**
```
import GPUtil
def memory_stats():
for gpu in GPUtil.getGPUs():
print("GPU RAM Free: {0:.0f}MB | Used: {1:.0f}MB | Util {2:3.0f}% | Total {3:.0f}MB".format(gpu.memoryFree, gpu.memoryUsed, gpu.memoryUtil*100, gpu.memoryTotal))
memory_stats()
```
**Deterministic Measurements**
This statements help making the experiments reproducible by fixing the random seeds. Despite fixing the random seeds, experiments are usually not reproducible using different PyTorch releases, commits, platforms or between CPU and GPU executions. Please find more details in the PyTorch documentation:
https://pytorch.org/docs/stable/notes/randomness.html
```
SEED = 0
t.manual_seed(SEED)
t.cuda.manual_seed(SEED)
t.backends.cudnn.deterministic = True
t.backends.cudnn.benchmark = False
np.random.seed(SEED)
random.seed(SEED)
```
**Loading Data**
The dataset is structured in multiple small folders of 7 images each. This generator iterates through the folders and returns the category and 7 paths: one for each image in the folder. The paths are ordered; the order is important since each folder contains 3 types of images, first 5 are with acetic acid solution and the last two are through a green lens and having iodine solution(a solution of a dark red color).
```
def sortByLastDigits(elem):
chars = [c for c in elem if c.isdigit()]
return 0 if len(chars) == 0 else int(''.join(chars))
def getImagesPaths(root_path):
for class_folder in [root_path + f for f in listdir(root_path)]:
category = int(class_folder[-1])
for case_folder in listdir(class_folder):
case_folder_path = class_folder + '/' + case_folder + '/'
img_files = [case_folder_path + file_name for file_name in listdir(case_folder_path)]
yield category, sorted(img_files, key = sortByLastDigits)
```
We define 3 datasets, which load 3 kinds of images: natural images, images taken through a green lens and images where the doctor applied iodine solution (which gives a dark red color). Each dataset has dynamic and static transformations which could be applied to the data. The static transformations are applied on the initialization of the dataset, while the dynamic ones are applied when loading each batch of data.
```
class SimpleImagesDataset(t.utils.data.Dataset):
def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None):
self.dataset = []
self.transforms_x = transforms_x_dynamic
self.transforms_y = transforms_y_dynamic
for category, img_files in getImagesPaths(root_path):
for i in range(5):
img = pil.Image.open(img_files[i])
if transforms_x_static != None:
img = transforms_x_static(img)
if transforms_y_static != None:
category = transforms_y_static(category)
self.dataset.append((img, category))
def __getitem__(self, i):
x, y = self.dataset[i]
if self.transforms_x != None:
x = self.transforms_x(x)
if self.transforms_y != None:
y = self.transforms_y(y)
return x, y
def __len__(self):
return len(self.dataset)
class GreenLensImagesDataset(SimpleImagesDataset):
def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None):
self.dataset = []
self.transforms_x = transforms_x_dynamic
self.transforms_y = transforms_y_dynamic
for category, img_files in getImagesPaths(root_path):
# Only the green lens image
img = pil.Image.open(img_files[-2])
if transforms_x_static != None:
img = transforms_x_static(img)
if transforms_y_static != None:
category = transforms_y_static(category)
self.dataset.append((img, category))
class RedImagesDataset(SimpleImagesDataset):
def __init__(self, root_path, transforms_x_static = None, transforms_x_dynamic = None, transforms_y_static = None, transforms_y_dynamic = None):
self.dataset = []
self.transforms_x = transforms_x_dynamic
self.transforms_y = transforms_y_dynamic
for category, img_files in getImagesPaths(root_path):
# Only the green lens image
img = pil.Image.open(img_files[-1])
if transforms_x_static != None:
img = transforms_x_static(img)
if transforms_y_static != None:
category = transforms_y_static(category)
self.dataset.append((img, category))
```
**Preprocess Data**
Convert pytorch tensor to numpy array.
```
def to_numpy(x):
return x.cpu().detach().numpy()
```
Data transformations for the test and training sets.
```
norm_mean = [0.485, 0.456, 0.406]
norm_std = [0.229, 0.224, 0.225]
transforms_train = tv.transforms.Compose([
tv.transforms.RandomAffine(degrees = 45, translate = None, scale = (1., 2.), shear = 30),
# tv.transforms.CenterCrop(CROP_SIZE),
tv.transforms.Resize(IMAGE_SIZE),
tv.transforms.RandomHorizontalFlip(),
tv.transforms.ToTensor(),
tv.transforms.Lambda(lambda t: t.cuda()),
tv.transforms.Normalize(mean=norm_mean, std=norm_std)
])
transforms_test = tv.transforms.Compose([
# tv.transforms.CenterCrop(CROP_SIZE),
tv.transforms.Resize(IMAGE_SIZE),
tv.transforms.ToTensor(),
tv.transforms.Normalize(mean=norm_mean, std=norm_std)
])
y_transform = tv.transforms.Lambda(lambda y: t.tensor(y, dtype=t.long, device = 'cuda:0'))
```
Initialize pytorch datasets and loaders for training and test.
```
def create_loaders(dataset_class):
dataset_train = dataset_class(TRAIN_PATH, transforms_x_dynamic = transforms_train, transforms_y_dynamic = y_transform)
dataset_test = dataset_class(TEST_PATH, transforms_x_static = transforms_test,
transforms_x_dynamic = tv.transforms.Lambda(lambda t: t.cuda()), transforms_y_dynamic = y_transform)
loader_train = DataLoader(dataset_train, BATCH_SIZE, shuffle = True, num_workers = 0)
loader_test = DataLoader(dataset_test, BATCH_SIZE, shuffle = False, num_workers = 0)
return loader_train, loader_test, len(dataset_train), len(dataset_test)
loader_train_simple_img, loader_test_simple_img, len_train, len_test = create_loaders(SimpleImagesDataset)
```
**Visualize Data**
Load a few images so that we can see the effects of the data augmentation on the training set.
```
def plot_one_prediction(x, label, pred):
x, label, pred = to_numpy(x), to_numpy(label), to_numpy(pred)
x = np.transpose(x, [1, 2, 0])
if x.shape[-1] == 1:
x = x.squeeze()
x = x * np.array(norm_std) + np.array(norm_mean)
plt.title(label, color = 'green' if label == pred else 'red')
plt.imshow(x)
def plot_predictions(imgs, labels, preds):
fig = plt.figure(figsize = (20, 5))
for i in range(20):
fig.add_subplot(2, 10, i + 1, xticks = [], yticks = [])
plot_one_prediction(imgs[i], labels[i], preds[i])
# x, y = next(iter(loader_train_simple_img))
# plot_predictions(x, y, y)
```
**Model**
Define a few models to experiment with.
```
def get_mobilenet_v2():
model = t.hub.load('pytorch/vision', 'mobilenet_v2', pretrained=True)
model.classifier[1] = Linear(in_features=1280, out_features=4, bias=True)
model = model.cuda()
return model
def get_vgg_19():
model = tv.models.vgg19(pretrained = True)
model = model.cuda()
model.classifier[6].out_features = 4
return model
def get_res_next_101():
model = t.hub.load('facebookresearch/WSL-Images', 'resnext101_32x8d_wsl')
model.fc.out_features = 4
model = model.cuda()
return model
def get_resnet_18():
model = tv.models.resnet18(pretrained = True)
model.fc.out_features = 4
model = model.cuda()
return model
def get_dense_net():
model = tv.models.densenet121(pretrained = True)
model.classifier.out_features = 4
model = model.cuda()
return model
class MobileNetV2_FullConv(t.nn.Module):
def __init__(self):
super().__init__()
self.cnn = get_mobilenet_v2().features
self.cnn[18] = t.nn.Sequential(
tv.models.mobilenet.ConvBNReLU(320, 32, kernel_size=1),
t.nn.Dropout2d(p = .7)
)
self.fc = t.nn.Linear(32, 4)
def forward(self, x):
x = self.cnn(x)
x = x.mean([2, 3])
x = self.fc(x);
return x
model_simple = t.nn.DataParallel(get_mobilenet_v2())
```
**Train & Evaluate**
Timer utility function. This is used to measure the execution speed.
```
time_start = 0
def timer_start():
global time_start
time_start = time.time()
def timer_end():
return time.time() - time_start
```
This function trains the network and evaluates it at the same time. It outputs the metrics recorded during the training for both train and test. We are measuring accuracy and the loss. The function also saves a checkpoint of the model every time the accuracy is improved. In the end we will have a checkpoint of the model which gave the best accuracy.
```
def train_eval(optimizer, model, loader_train, loader_test, chekpoint_name, epochs):
metrics = {
'losses_train': [],
'losses_test': [],
'acc_train': [],
'acc_test': [],
'prec_train': [],
'prec_test': [],
'rec_train': [],
'rec_test': [],
'f_score_train': [],
'f_score_test': []
}
best_acc = 0
loss_fn = t.nn.CrossEntropyLoss()
try:
for epoch in range(epochs):
timer_start()
train_epoch_loss, train_epoch_acc, train_epoch_precision, train_epoch_recall, train_epoch_f_score = 0, 0, 0, 0, 0
test_epoch_loss, test_epoch_acc, test_epoch_precision, test_epoch_recall, test_epoch_f_score = 0, 0, 0, 0, 0
# Train
model.train()
for x, y in loader_train:
y_pred = model.forward(x)
loss = loss_fn(y_pred, y)
loss.backward()
optimizer.step()
# memory_stats()
optimizer.zero_grad()
y_pred, y = to_numpy(y_pred), to_numpy(y)
pred = y_pred.argmax(axis = 1)
ratio = len(y) / len_train
train_epoch_loss += (loss.item() * ratio)
train_epoch_acc += (sk.metrics.accuracy_score(y, pred) * ratio)
precision, recall, f_score, _ = sk.metrics.precision_recall_fscore_support(y, pred, average = 'macro')
train_epoch_precision += (precision * ratio)
train_epoch_recall += (recall * ratio)
train_epoch_f_score += (f_score * ratio)
metrics['losses_train'].append(train_epoch_loss)
metrics['acc_train'].append(train_epoch_acc)
metrics['prec_train'].append(train_epoch_precision)
metrics['rec_train'].append(train_epoch_recall)
metrics['f_score_train'].append(train_epoch_f_score)
# Evaluate
model.eval()
with t.no_grad():
for x, y in loader_test:
y_pred = model.forward(x)
loss = loss_fn(y_pred, y)
y_pred, y = to_numpy(y_pred), to_numpy(y)
pred = y_pred.argmax(axis = 1)
ratio = len(y) / len_test
test_epoch_loss += (loss * ratio)
test_epoch_acc += (sk.metrics.accuracy_score(y, pred) * ratio )
precision, recall, f_score, _ = sk.metrics.precision_recall_fscore_support(y, pred, average = 'macro')
test_epoch_precision += (precision * ratio)
test_epoch_recall += (recall * ratio)
test_epoch_f_score += (f_score * ratio)
metrics['losses_test'].append(test_epoch_loss)
metrics['acc_test'].append(test_epoch_acc)
metrics['prec_test'].append(test_epoch_precision)
metrics['rec_test'].append(test_epoch_recall)
metrics['f_score_test'].append(test_epoch_f_score)
if metrics['acc_test'][-1] > best_acc:
best_acc = metrics['acc_test'][-1]
t.save({'model': model.state_dict()}, 'checkpint {}.tar'.format(chekpoint_name))
print('Epoch {} acc {} prec {} rec {} f {} minutes {}'.format(
epoch + 1, metrics['acc_test'][-1], metrics['prec_test'][-1], metrics['rec_test'][-1], metrics['f_score_test'][-1], timer_end() / 60))
except KeyboardInterrupt as e:
print(e)
print('Ended training')
return metrics
```
Plot a metric for both train and test.
```
def plot_train_test(train, test, title, y_title):
plt.plot(range(len(train)), train, label = 'train')
plt.plot(range(len(test)), test, label = 'test')
plt.xlabel('Epochs')
plt.ylabel(y_title)
plt.title(title)
plt.legend()
plt.show()
```
Plot precision - recall curve
```
def plot_precision_recall(metrics):
plt.scatter(metrics['prec_train'], metrics['rec_train'], label = 'train')
plt.scatter(metrics['prec_test'], metrics['rec_test'], label = 'test')
plt.legend()
plt.title('Precision-Recall')
plt.xlabel('Precision')
plt.ylabel('Recall')
```
Train a model for several epochs. The steps_learning parameter is a list of tuples. Each tuple specifies the steps and the learning rate.
```
def do_train(model, loader_train, loader_test, checkpoint_name, steps_learning):
for steps, learn_rate in steps_learning:
metrics = train_eval(t.optim.Adam(model.parameters(), lr = learn_rate, weight_decay = 0), model, loader_train, loader_test, checkpoint_name, steps)
print('Best test accuracy :', max(metrics['acc_test']))
plot_train_test(metrics['losses_train'], metrics['losses_test'], 'Loss (lr = {})'.format(learn_rate))
plot_train_test(metrics['acc_train'], metrics['acc_test'], 'Accuracy (lr = {})'.format(learn_rate))
```
Perform actual training.
```
def do_train(model, loader_train, loader_test, checkpoint_name, steps_learning):
t.cuda.empty_cache()
for steps, learn_rate in steps_learning:
metrics = train_eval(t.optim.Adam(model.parameters(), lr = learn_rate, weight_decay = 0), model, loader_train, loader_test, checkpoint_name, steps)
index_max = np.array(metrics['acc_test']).argmax()
print('Best test accuracy :', metrics['acc_test'][index_max])
print('Corresponding precision :', metrics['prec_test'][index_max])
print('Corresponding recall :', metrics['rec_test'][index_max])
print('Corresponding f1 score :', metrics['f_score_test'][index_max])
plot_train_test(metrics['losses_train'], metrics['losses_test'], 'Loss (lr = {})'.format(learn_rate), 'Loss')
plot_train_test(metrics['acc_train'], metrics['acc_test'], 'Accuracy (lr = {})'.format(learn_rate), 'Accuracy')
plot_train_test(metrics['prec_train'], metrics['prec_test'], 'Precision (lr = {})'.format(learn_rate), 'Precision')
plot_train_test(metrics['rec_train'], metrics['rec_test'], 'Recall (lr = {})'.format(learn_rate), 'Recall')
plot_train_test(metrics['f_score_train'], metrics['f_score_test'], 'F1 Score (lr = {})'.format(learn_rate), 'F1 Score')
plot_precision_recall(metrics)
do_train(model_simple, loader_train_simple_img, loader_test_simple_img, 'simple_1', [(50, 1e-4)])
# checkpoint = t.load('/content/checkpint simple_1.tar')
# model_simple.load_state_dict(checkpoint['model'])
```
| github_jupyter |
# graphblas.matrix_multiply
This example will go over how to use the `--graphblas-lower` pass from `graphblas-opt` to lower the `graphblas.matrix_multiply` op.
Let’s first import some necessary modules and generate an instance of our JIT engine.
```
import mlir_graphblas
import mlir_graphblas.sparse_utils
import numpy as np
engine = mlir_graphblas.MlirJitEngine()
```
Here are the passes we'll use.
```
passes = [
"--graphblas-lower",
"--sparsification",
"--sparse-tensor-conversion",
"--linalg-bufferize",
"--func-bufferize",
"--tensor-bufferize",
"--tensor-constant-bufferize",
"--finalizing-bufferize",
"--convert-linalg-to-loops",
"--convert-scf-to-std",
"--convert-std-to-llvm",
]
```
Similar to our examples using the GraphBLAS dialect, we'll need some helper functions to convert sparse tensors to dense tensors.
We'll also need some helpers to convert our sparse matrices to CSC format.
```
mlir_text = """
#trait_densify_csr = {
indexing_maps = [
affine_map<(i,j) -> (i,j)>,
affine_map<(i,j) -> (i,j)>
],
iterator_types = ["parallel", "parallel"]
}
#CSR64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (i,j)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
func @csr_densify4x4(%argA: tensor<4x4xf64, #CSR64>) -> tensor<4x4xf64> {
%output_storage = constant dense<0.0> : tensor<4x4xf64>
%0 = linalg.generic #trait_densify_csr
ins(%argA: tensor<4x4xf64, #CSR64>)
outs(%output_storage: tensor<4x4xf64>) {
^bb(%A: f64, %x: f64):
linalg.yield %A : f64
} -> tensor<4x4xf64>
return %0 : tensor<4x4xf64>
}
#trait_densify_csc = {
indexing_maps = [
affine_map<(i,j) -> (j,i)>,
affine_map<(i,j) -> (i,j)>
],
iterator_types = ["parallel", "parallel"]
}
#CSC64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
func @csc_densify4x4(%argA: tensor<4x4xf64, #CSC64>) -> tensor<4x4xf64> {
%output_storage = constant dense<0.0> : tensor<4x4xf64>
%0 = linalg.generic #trait_densify_csc
ins(%argA: tensor<4x4xf64, #CSC64>)
outs(%output_storage: tensor<4x4xf64>) {
^bb(%A: f64, %x: f64):
linalg.yield %A : f64
} -> tensor<4x4xf64>
return %0 : tensor<4x4xf64>
}
func @convert_csr_to_csc(%sparse_tensor: tensor<?x?xf64, #CSR64>) -> tensor<?x?xf64, #CSC64> {
%answer = graphblas.convert_layout %sparse_tensor : tensor<?x?xf64, #CSR64> to tensor<?x?xf64, #CSC64>
return %answer : tensor<?x?xf64, #CSC64>
}
"""
```
Let's compile our MLIR code.
```
engine.add(mlir_text, passes)
```
## Overview of graphblas.matrix_multiply
Here, we'll show how to use the `graphblas.matrix_multiply` op.
`graphblas.matrix_multiply` takes a sparse matrix operand in CSR format, a sparse matrix operand in CSC format, and a `semiring` attribute.
The single `semiring` attribute indicates an element-wise operator and an aggregation operator. For example, the plus-times semiring indicates an element-wise operator of multiplication and an aggregation operator of addition/summation. For more details about semirings, see [here](https://en.wikipedia.org/wiki/GraphBLAS).
`graphblas.matrix_multiply` applies the semiring's element-wise operator and aggregation operator in matrix-multiply order over the two given sparse matrices. For example, using `graphblas.matrix_multiply` with the plus-times semiring will get a matrix that is the result of a conventional matrix multiply.
Here's an example use of the `graphblas.matrix_multiply` op:
```
%answer = graphblas.matrix_multiply %argA, %argB, %mask { semiring = "plus_times" } : (tensor<2x2xf64, #CSR64>, tensor<2x2xf64, #CSC64>, tensor<2x2xf64, #CSR64>) to tensor<2x2xf64, #CSR64>
```
The supported options for the `semiring` attribute are "plus_pair", "plus_plus", and "plus_times".
`graphblas.matrix_multiply` can also take an optional mask operand (a CSR matrix) as shown in this example:
```
%answer = graphblas.matrix_multiply %argA, %argB, %mask { semiring = "plus_times" } : (tensor<2x3xf64, #CSR64>, tensor<3x2xf64, #CSC64>, tensor<2x2xf64, #CSR64>) to tensor<2x2xf64, #CSR64>
```
The mask operand must have the same shape as the output matrix. The mask operand acts as a boolean mask (though doesn't necessarily have to have a boolean element type) for the result, which increases performance since the mask will indicate which values in the output do not have to be calculated.
`graphblas.matrix_multiply` can also take an optional [region](https://mlir.llvm.org/docs/LangRef/#regions) as shown in this example:
```
%cf4 = constant 4.0 : f64
%answer = graphblas.matrix_multiply %argA, %argB { semiring = "plus_times" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64> {
^bb0(%value: f64):
%result = std.addf %value, %cf4: f64
graphblas.yield %result : f64
}
```
The NumPy equivalent of this code would be `answer = (argA @ argB) + 4.0`.
The region specifies element-wise post-processing done on values that survived the masking (applies to all elements if no mask). We'll go into deeper details later on on how to write a region using `graphblas.yield`.
Let's create some example input matrices.
```
indices = np.array(
[
[0, 3],
[1, 3],
[2, 0],
[3, 0],
[3, 1],
],
dtype=np.uint64,
)
values = np.array([1, 2, 3, 4, 5], dtype=np.float64)
sizes = np.array([4, 4], dtype=np.uint64)
sparsity = np.array([False, True], dtype=np.bool8)
A = mlir_graphblas.sparse_utils.MLIRSparseTensor(indices, values, sizes, sparsity)
indices = np.array(
[
[0, 1],
[0, 3],
[1, 1],
[1, 3],
[2, 0],
[2, 2],
[3, 0],
[3, 2],
],
dtype=np.uint64,
)
values = np.array([1, 2, 3, 4, 5, 6, 7, 8], dtype=np.float64)
sizes = np.array([4, 4], dtype=np.uint64)
sparsity = np.array([False, True], dtype=np.bool8)
B_csr = mlir_graphblas.sparse_utils.MLIRSparseTensor(indices, values, sizes, sparsity)
B = engine.convert_csr_to_csc(B_csr)
indices = np.array(
[
[0, 1],
[0, 2],
[1, 1],
[1, 2],
[2, 1],
[2, 2],
[3, 1],
[3, 2],
],
dtype=np.uint64,
)
values = np.array([1, 1, 1, 1, 1, 1, 1, 1], dtype=np.float64)
sizes = np.array([4, 4], dtype=np.uint64)
sparsity = np.array([False, True], dtype=np.bool8)
mask = mlir_graphblas.sparse_utils.MLIRSparseTensor(indices, values, sizes, sparsity)
A_dense = engine.csr_densify4x4(A)
A_dense
B_dense = engine.csc_densify4x4(B)
B_dense
mask_dense = engine.csr_densify4x4(mask)
mask_dense
```
## graphblas.matrix_multiply (Plus-Times Semiring)
Here, we'll simply perform a conventional matrix-multiply by using `graphblas.matrix_multiply` with the plus-times semiring.
```
mlir_text = """
#CSR64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (i,j)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
#CSC64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
module {
func @matrix_multiply_plus_times(%a: tensor<?x?xf64, #CSR64>, %b: tensor<?x?xf64, #CSC64>) -> tensor<?x?xf64, #CSR64> {
%answer = graphblas.matrix_multiply %a, %b { semiring = "plus_times" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64>
return %answer : tensor<?x?xf64, #CSR64>
}
}
"""
engine.add(mlir_text, passes)
sparse_matmul_result = engine.matrix_multiply_plus_times(A, B)
engine.csr_densify4x4(sparse_matmul_result)
```
The result looks sane. Let's verify that it has the same behavior as NumPy.
```
np.all(A_dense @ B_dense == engine.csr_densify4x4(sparse_matmul_result))
```
## graphblas.matrix_multiply (Plus-Plus Semiring with Mask)
Here, we'll perform a matrix-multiply with the plus-plus semiring. We'll show the result with and without a mask to demonstrate how the masking works.
```
mlir_text = """
#CSR64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (i,j)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
#CSC64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
module {
func @matrix_multiply_plus_plus_no_mask(%a: tensor<?x?xf64, #CSR64>, %b: tensor<?x?xf64, #CSC64>) -> tensor<?x?xf64, #CSR64> {
%answer = graphblas.matrix_multiply %a, %b { semiring = "plus_plus" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64>
return %answer : tensor<?x?xf64, #CSR64>
}
func @matrix_multiply_plus_plus(%a: tensor<?x?xf64, #CSR64>, %b: tensor<?x?xf64, #CSC64>, %m: tensor<?x?xf64, #CSR64>) -> tensor<?x?xf64, #CSR64> {
%answer = graphblas.matrix_multiply %a, %b, %m { semiring = "plus_plus" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>, tensor<?x?xf64, #CSR64>) to tensor<?x?xf64, #CSR64>
return %answer : tensor<?x?xf64, #CSR64>
}
}
"""
engine.add(mlir_text, passes)
no_mask_result = engine.matrix_multiply_plus_plus_no_mask(A, B)
with_mask_result = engine.matrix_multiply_plus_plus(A, B, mask)
engine.csr_densify4x4(no_mask_result)
engine.csr_densify4x4(with_mask_result)
```
Note how the results in the masked output only have elements present in the positions where the mask had elements present.
Since we can't verify the results via NumPy given that it doesn't support semirings in its matrix multiply implementation, we'll leave the task of verifying the results as an exercise for the reader. Note that if we're applying the element-wise operation to the values at two positions (one each sparse tensor) and one position has a value but not the other does not, then the element-wise operation for these two positions will contribute no value to be aggregated.
## graphblas.matrix_multiply (Plus-Pair Semiring with Region)
Here, we'll perform a matrix-multiply with the plus-pair semiring. We'll show the result without using a region and with a region.
The element-wise operation of the plus-pair semiring is defined as `pair(x, y) = 1`.
```
mlir_text = """
#CSR64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (i,j)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
#CSC64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
module {
func @matrix_multiply_plus_pair_no_region(%a: tensor<?x?xf64, #CSR64>, %b: tensor<?x?xf64, #CSC64>) -> tensor<?x?xf64, #CSR64> {
%answer = graphblas.matrix_multiply %a, %b { semiring = "plus_pair" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64>
return %answer : tensor<?x?xf64, #CSR64>
}
func @matrix_multiply_plus_pair_and_square(%a: tensor<?x?xf64, #CSR64>, %b: tensor<?x?xf64, #CSC64>) -> tensor<?x?xf64, #CSR64> {
%answer = graphblas.matrix_multiply %a, %b { semiring = "plus_pair" } : (tensor<?x?xf64, #CSR64>, tensor<?x?xf64, #CSC64>) to tensor<?x?xf64, #CSR64> {
^bb0(%value: f64):
%result = std.mulf %value, %value: f64
graphblas.yield %result : f64
}
return %answer : tensor<?x?xf64, #CSR64>
}
}
"""
engine.add(mlir_text, passes)
```
The code in the region of `matrix_multiply_plus_pair_and_square` simply squares each individual element's value. The use of `graphblas.yield` is used here to indicate the result of each element-wise squaring.
Let's first get our results without the region.
`matrix_multiply_plus_pair_no_region` simply does a matrix multiply with the plus-pair semiring.
```
no_region_result = engine.matrix_multiply_plus_pair_no_region(A, B)
engine.csr_densify4x4(no_region_result)
```
Let's now get the results from `matrix_multiply_plus_pair_and_square`.
```
with_region_result = engine.matrix_multiply_plus_pair_and_square(A, B)
engine.csr_densify4x4(with_region_result)
```
Let's verify that our results are sane.
```
np.all(engine.csr_densify4x4(with_region_result) == engine.csr_densify4x4(no_region_result)**2)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/ProfessorDong/Deep-Learning-Course-Examples/blob/master/CNN_Examples/ComputerVision0.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#Classify 10 different object with Convolutional Neural Network
###Dataset
The dataset we will use is built into tensorflow and called the [**CIFAR Image Dataset.**](https://www.cs.toronto.edu/~kriz/cifar.html) It contains 60,000 32x32 color images with 6000 images of each class.
The labels in this dataset are the following:
- Airplane
- Automobile
- Bird
- Cat
- Deer
- Dog
- Frog
- Horse
- Ship
- Truck
*This tutorial is based on the guide from the TensorFlow documentation: https://www.tensorflow.org/tutorials/images/cnn*
Load Python libraries
```
```
Load the image data and split into "train" and "test" data
Normalize the pixel values to be between 0 and 1
Define class names
```
```
Show an example image
```
```
##CNN Architecture
A common architecture for a CNN is a stack of Conv2D and MaxPooling2D layers followed by a few denesly connected layers.
The stack of convolutional and maxPooling layers extract the features from the image. Then these features are flattened and fed to densly connected layers that determine the class of an image based on the presence of features.
##Add Convolutional Layers
**Layer 1**
The input shape of our data will be 32, 32, 3 and we will process 32 filters of size 3x3 over our input data. We will also apply the activation function relu to the output of each convolution operation.
**Layer 2**
This layer will perform the max pooling operation using 2x2 samples and a stride of 2.
**Other Layers**
The next set of layers do very similar things but take as input the feature map from the previous layer. They also increase the frequency of filters from 32 to 64. We can do this as our data shrinks in spacial dimensions as it passed through the layers, meaning we can afford (computationally) to add more depth.
```
```
Show model summary:
Total trainable parameters: 56,320
The depth of the feature map increases but the spacial dimensions reduce.
```
```
###Feature Maps
The term *feature map* stands for a 3D tensor with two spacial axes (width and height) and one depth axis. Our convolutional layers take feature maps as their input and return a new feature map that reprsents the prescence of spcific filters from the previous feature map. These are what we call *response maps*.
##Add Dense Layers
So far, we have just completed the **convolutional base**. Now we need to take these extracted features and add a way to classify them.
Add a fully connected layer with 64 output nodes
Add a fully connected layer with 10 (final) output nodes
```
```
Show model summary
Total trainable parameters: 122,570
```
```
##Train the Model
Train and compile the model using the recommended hyper paramaters from tensorflow.
```
```
##Evaluate the Model
We evaluate how well the model performs by looking at it's performance on the test data set.
You should get an accuracy of about 70%. This isn't bad for a simple model like this, but we'll dive into some better approaches for computer vision.
```
```
##Working with Small Datasets
In the situation where you don't have millions of images it is difficult to train a CNN from scratch that performs very well. This is why we will learn about a few techniques we can use to train CNN's on small datasets of just a few thousand images.
###Data Augmentation
To avoid overfitting and create a larger dataset from a smaller one we can use a technique called data augmentation. This is to perform random transofrmations on our images so that our model can generalize better. These transformations can be things like compressions, rotations, stretches and even color changes.
Fortunately, Keras can help us do this. Look at the code below to an example of data augmentation.
```
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator
# creates a data generator object that transforms images
datagen = ImageDataGenerator(
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
# pick an image to transform
test_img = train_images[20]
img = image.img_to_array(test_img) # convert image to numpy arry
img = img.reshape((1,) + img.shape) # reshape image
i = 0
for batch in datagen.flow(img, save_prefix='test', save_format='jpeg'): # this loops runs forever until we break, saving images to current directory with specified prefix
plt.figure(i)
plot = plt.imshow(image.img_to_array(batch[0]))
i += 1
if i > 4: # show 4 images
break
plt.show()
```
#Use a Pretrained Model
In this section we will combine the tecniques we learned above and use a pretrained model and fine tuning to classify images of dogs and cats using a small dataset.
##Pretrained Models
In this section we will use a pretrained CNN as part of our own custom network to improve the accuracy of our model. We know that CNN's alone (with no dense layers) don't do anything other than map the presence of features from our input. This means we can use a pretrained CNN, one trained on millions of images, as the start of our model. This will allow us to have a very good convolutional base before adding our own dense layered classifier at the end. In fact, by using this techique we can train a very good classifier for a realtively small dataset (< 10,000 images). This is because the ConvNet already has a very good idea of what features to look for in an image and can find them very effectively. So, if we can determine the presence of features all the rest of the model needs to do is determine which combination of features makes a specific image.
##Fine Tuning
When we employ the technique defined above, we will often want to tweak the final layers in our convolutional base to work better for our specific problem. This involves not touching or retraining the earlier layers in our convolutional base but only adjusting the final few. We do this because the first layers in our base are very good at extracting low level features lile lines and edges, things that are similar for any kind of image. Where the later layers are better at picking up very specific features like shapes or even eyes. If we adjust the final layers than we can look for only features relevant to our very specific problem.
*This tutorial is based on the following guide from the TensorFlow documentation: https://www.tensorflow.org/tutorials/images/transfer_learning*
```
#Imports
import os
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
```
###Load the Dataset
We will load the *cats_vs_dogs* dataset from the modoule tensorflow_datatsets.
This dataset contains (image, label) pairs where images have different dimensions and 3 color channels.
```
import tensorflow_datasets as tfds
tfds.disable_progress_bar()
# split the data manually into 80% training, 10% testing, 10% validation
(raw_train, raw_validation, raw_test), metadata = tfds.load(
'cats_vs_dogs',
split=['train[:80%]', 'train[80%:90%]', 'train[90%:]'],
with_info=True,
as_supervised=True,
)
```
Display images from the dataset
```
get_label_name = metadata.features['label'].int2str # creates a function object that we can use to get labels
for image, label in raw_train.take(5):
plt.figure()
plt.imshow(image)
plt.title(get_label_name(label))
```
###Data Preprocessing
Since the sizes of our images are all different, we need to convert them all to the same size. We can create a function that will do that for us below.
```
IMG_SIZE = 160 # All images will be resized to 160x160
def format_example(image, label):
"""
returns an image that is reshaped to IMG_SIZE
"""
image = tf.cast(image, tf.float32)
image = (image/127.5) - 1
image = tf.image.resize(image, (IMG_SIZE, IMG_SIZE))
return image, label
```
Now we can apply this function to all our images using ```.map()```.
```
train = raw_train.map(format_example)
validation = raw_validation.map(format_example)
test = raw_test.map(format_example)
```
Let's have a look at our images now.
```
for image, label in train.take(2):
plt.figure()
plt.imshow(image)
plt.title(get_label_name(label))
```
Finally we will shuffle and batch the images.
```
BATCH_SIZE = 32
SHUFFLE_BUFFER_SIZE = 1000
train_batches = train.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE)
validation_batches = validation.batch(BATCH_SIZE)
test_batches = test.batch(BATCH_SIZE)
```
Now if we look at the shape of an original image vs the new image we will see it has been changed.
```
for img, label in raw_train.take(2):
print("Original shape:", img.shape)
for img, label in train.take(2):
print("New shape:", img.shape)
```
##Pick a Pretrained Model
The model we are going to use as the convolutional base for our model is the **MobileNet V2** developed at Google. This model is trained on 1.4 million images and has 1000 different classes.
We want to use this model but only its convolutional base. So, when we load in the model, we'll specify that we don't want to load the top (classification) layer. We'll tell the model what input shape to expect and to use the predetermined weights from *imagenet* (Googles dataset).
```
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
# Create the base model from the pre-trained model MobileNet V2
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_model.summary()
```
At this point this *base_model* will simply output a shape (32, 5, 5, 1280) tensor that is a feature extraction from our original (1, 160, 160, 3) image. The 32 means that we have 32 layers of differnt filters/features.
```
for image, _ in train_batches.take(1):
pass
feature_batch = base_model(image)
print(feature_batch.shape)
```
###Freeze the Base
The term **freezing** refers to disabling the training property of a layer. It simply means we won’t make any changes to the weights of any layers that are frozen during training. This is important as we don't want to change the convolutional base that already has learned weights.
```
base_model.trainable = False
base_model.summary()
```
###Adding our Classifier
Now that we have our base layer setup, we can add the classifier. Instead of flattening the feature map of the base layer we will use a global average pooling layer that will average the entire 5x5 area of each 2D feature map and return to us a single 1280 element vector per filter.
```
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
```
Finally, we will add the predicition layer that will be a single dense neuron. We can do this because we only have two classes to predict for.
```
prediction_layer = keras.layers.Dense(1)
```
Now we will combine these layers together in a model.
```
model = tf.keras.Sequential([
base_model,
global_average_layer,
prediction_layer
])
model.summary()
```
###Train the Model
We will train and compile the model. We will use a very small learning rate to ensure that the model does not have any major changes made to it.
```
base_learning_rate = 0.0001
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=base_learning_rate),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=['accuracy'])
# We can evaluate the model right now to see how it does before training it on our new images
initial_epochs = 3
validation_steps=20
loss0,accuracy0 = model.evaluate(validation_batches, steps = validation_steps)
# Now we can train it on our images
history = model.fit(train_batches,
epochs=initial_epochs,
validation_data=validation_batches)
acc = history.history['accuracy']
print(acc)
model.save("dogs_vs_cats.h5") # we can save the model and reload it at anytime in the future
new_model = tf.keras.models.load_model('dogs_vs_cats.h5')
```
| github_jupyter |
## Library Imports
```
from time import time
notebook_start_time = time()
import os
import re
import gc
import pickle
import random as r
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
from torch import nn, optim
from torch.utils.data import Dataset
from torch.utils.data import DataLoader as DL
from torch.nn.utils import weight_norm as WN
from torchvision import models, transforms
from time import time
from sklearn.model_selection import KFold
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import StandardScaler
import warnings
warnings.filterwarnings("ignore")
```
## Constants and Utilities
```
SEED = 49
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
NUM_FEATURES = 1664
TRANSFORM = transforms.Compose([transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406],
[0.229, 0.224, 0.225]),
])
PATH = "../input/petfinder-pawpularity-score"
IMAGE_PATH = "../input/petfinder-pretrained-images-nocrop"
verbose = True
DEBUG = False
sc_y = StandardScaler()
def breaker(num=50, char="*") -> None:
print("\n" + num*char + "\n")
def get_targets(path: str) -> np.ndarray:
df = pd.read_csv(os.path.join(path, "train.csv"), engine="python")
targets = df["Pawpularity"].copy().values
return targets.reshape(-1, 1)
def show_graphs(L: list, title=None) -> None:
TL, VL = [], []
for i in range(len(L)):
TL.append(L[i]["train"])
VL.append(L[i]["valid"])
x_Axis = np.arange(1, len(L) + 1)
plt.figure()
plt.plot(x_Axis, TL, "r", label="train")
plt.plot(x_Axis, VL, "b", label="valid")
plt.grid()
plt.legend()
if title:
plt.title("{} Loss".format(title))
else:
plt.title("Loss")
plt.show()
```
## Dataset Template and Build Dataloader
```
class DS(Dataset):
def __init__(self, images=None, targets=None, transform=None):
self.images = images
self.targets = targets
self.transform = transform
def __len__(self):
return self.images.shape[0]
def __getitem__(self, idx):
return self.transform(self.images[idx]), torch.FloatTensor(self.targets[idx])
def build_dataloaders(tr_images: np.ndarray, va_images: np.ndarray,
tr_targets: np.ndarray, va_targets: np.ndarray,
batch_size: int, seed: int, transform: transforms.transforms.Compose):
if verbose:
breaker()
print("Building Train and Validation DataLoaders ...")
tr_data_setup = DS(images=tr_images, targets=tr_targets, transform=transform)
va_data_setup = DS(images=va_images, targets=va_targets, transform=transform)
dataloaders = {
"train" : DL(tr_data_setup, batch_size=batch_size, shuffle=True, generator=torch.manual_seed(seed)),
"valid" : DL(va_data_setup, batch_size=batch_size, shuffle=False)
}
return dataloaders
```
## Build Model
```
def build_model(IL: int, seed: int):
class Model(nn.Module):
def __init__(self, IL=None):
super(Model, self).__init__()
self.features = models.densenet169(pretrained=True, progress=False)
self.features = nn.Sequential(*[*self.features.children()][:-1])
self.freeze()
self.features.add_module("Adaptive Average Pool", nn.AdaptiveAvgPool2d(output_size=(1, 1)))
self.features.add_module("Flatten", nn.Flatten())
self.predictor = nn.Sequential()
self.predictor.add_module("BN", nn.BatchNorm1d(num_features=IL, eps=1e-5))
self.predictor.add_module("FC", WN(nn.Linear(in_features=IL, out_features=1)))
def freeze(self):
for params in self.parameters():
params.requires_grad = False
for names, params in self.named_parameters():
if re.match(r"features.0.denseblock4", names, re.IGNORECASE):
params.requires_grad = True
if re.match(r"features.0.norm5", names, re.IGNORECASE):
params.requires_grad = True
def get_optimizer(self, lr=1e-3, wd=0.0):
params = [p for p in self.parameters() if p.requires_grad]
return optim.SGD(params, lr=lr, momentum=0.9, weight_decay=wd)
def forward(self, x1, x2=None):
if x2 is not None:
x1 = self.features(x1)
x2 = self.features(x2)
return self.predictor(x1), self.predictor(x2)
else:
x1 = self.features(x1)
return self.predictor(x1)
if verbose:
breaker()
print("Building Model ...")
torch.manual_seed(seed)
model = Model(IL=IL)
return model
```
## Fit and Predict
```
def fit(model=None, optimizer=None, scheduler=None,
epochs=None, early_stopping_patience=None,
dataloaders=None, fold=None, verbose=False) -> tuple:
name = "./Fold_{}_state.pt".format(fold)
if verbose:
breaker()
print("Training Fold {}...".format(fold))
breaker()
else:
print("Training Fold {}...".format(fold))
Losses = []
bestLoss = {"train" : np.inf, "valid" : np.inf}
start_time = time()
for e in range(epochs):
e_st = time()
epochLoss = {"train" : np.inf, "valid" : np.inf}
for phase in ["train", "valid"]:
if phase == "train":
model.train()
else:
model.eval()
lossPerPass = []
for X, y in dataloaders[phase]:
X, y = X.to(DEVICE), y.to(DEVICE)
optimizer.zero_grad()
with torch.set_grad_enabled(phase == "train"):
output = model(X)
loss = torch.nn.MSELoss()(output, y)
if phase == "train":
loss.backward()
optimizer.step()
lossPerPass.append(loss.item())
epochLoss[phase] = np.mean(np.array(lossPerPass))
Losses.append(epochLoss)
if early_stopping_patience:
if epochLoss["valid"] < bestLoss["valid"]:
bestLoss = epochLoss
BLE = e + 1
torch.save({"model_state_dict": model.state_dict(),
"optim_state_dict": optimizer.state_dict()},
name)
early_stopping_step = 0
else:
early_stopping_step += 1
if early_stopping_step > early_stopping_patience:
if verbose:
print("\nEarly Stopping at Epoch {}".format(e))
break
if epochLoss["valid"] < bestLoss["valid"]:
bestLoss = epochLoss
BLE = e + 1
torch.save({"model_state_dict": model.state_dict(),
"optim_state_dict": optimizer.state_dict()},
name)
if scheduler:
scheduler.step(epochLoss["valid"])
if verbose:
print("Epoch: {} | Train Loss: {:.5f} | Valid Loss: {:.5f} | Time: {:.2f} seconds".format(e+1, epochLoss["train"], epochLoss["valid"], time()-e_st))
if verbose:
breaker()
print("Best Validation Loss at Epoch {}".format(BLE))
breaker()
print("Time Taken [{} Epochs] : {:.2f} minutes".format(len(Losses), (time()-start_time)/60))
breaker()
print("Training Completed")
breaker()
return Losses, BLE, name
#####################################################################################################
def predict_batch(model=None, dataloader=None, mode="test", path=None) -> np.ndarray:
model.load_state_dict(torch.load(path, map_location=DEVICE)["model_state_dict"])
model.to(DEVICE)
model.eval()
y_pred = torch.zeros(1, 1).to(DEVICE)
if re.match(r"valid", mode, re.IGNORECASE):
for X, _ in dataloader:
X = X.to(DEVICE)
with torch.no_grad():
output = model(X)
y_pred = torch.cat((y_pred, output.view(-1, 1)), dim=0)
elif re.match(r"test", mode, re.IGNORECASE):
for X in dataloader:
X = X.to(DEVICE)
with torch.no_grad():
output = model(X)
y_pred = torch.cat((y_pred, output.view(-1, 1)), dim=0)
return y_pred[1:].detach().cpu().numpy()
```
## Train
```
def train(images: np.ndarray, targets: np.ndarray,
n_splits: int, batch_size: int,
lr: float, wd: float,
epochs: int, early_stopping: int,
patience=None, eps=None) -> list:
metrics = []
KFold_start_time = time()
breaker()
print("Performing {} Fold CV ...".format(n_splits))
if verbose:
pass
else:
breaker()
fold = 1
for tr_idx, va_idx in KFold(n_splits=n_splits, shuffle=True, random_state=SEED).split(images):
tr_images, va_images = images[tr_idx], images[va_idx]
tr_targets, va_targets = targets[tr_idx], targets[va_idx]
tr_targets = sc_y.fit_transform(tr_targets)
va_targets = sc_y.transform(va_targets)
dataloaders = build_dataloaders(tr_images, va_images,
tr_targets, va_targets,
batch_size, SEED, TRANSFORM)
model = build_model(IL=NUM_FEATURES, seed=SEED).to(DEVICE)
optimizer = model.get_optimizer(lr=lr, wd=wd)
scheduler = None
if isinstance(patience, int) and isinstance(eps, float):
scheduler = model.get_plateau_scheduler(optimizer, patience, eps)
L, _, name = fit(model=model, optimizer=optimizer, scheduler=scheduler,
epochs=epochs, early_stopping_patience=early_stopping,
dataloaders=dataloaders, fold=fold, verbose=verbose)
y_pred = predict_batch(model=model, dataloader=dataloaders["valid"], mode="valid", path=name)
RMSE = np.sqrt(mean_squared_error(sc_y.inverse_transform(y_pred), sc_y.inverse_transform(va_targets)))
if verbose:
print("Validation RMSE [Fold {}]: {:.5f}".format(fold, RMSE))
breaker()
show_graphs(L)
metrics_dict = {"Fold" : fold, "RMSE" : RMSE}
metrics.append(metrics_dict)
fold += 1
breaker()
print("Total Time to {} Fold CV : {:.2f} minutes".format(n_splits, (time() - KFold_start_time)/60))
return metrics, (time() - KFold_start_time)/60
def main():
breaker()
print("Clean Memory, {} Objects Collected ...".format(gc.collect()))
########### Params ###########
if DEBUG:
n_splits = 3
patience, eps = 5, 1e-8
epochs, early_stopping = 5, 5
batch_size = 64
lr = 1e-5
wd = 1e-3
else:
n_splits = 10
patience, eps = 5, 1e-8
epochs, early_stopping = 25, 5
batch_size = 64
lr = 1e-5
wd = 1e-3
##############################
if verbose:
breaker()
print("Loading Data ...")
feature_start_time = time()
images = np.load(os.path.join(IMAGE_PATH, "Images_224x224.npy"))
targets = get_targets(PATH)
# Without Scheduler
metrics, _ = train(images, targets, n_splits, batch_size, lr, wd, epochs, early_stopping, patience=None, eps=None)
# # With Plateau Scheduler
# metrics, _ = train(images, targets, n_splits, batch_size, lr, wd, epochs, early_stopping, patience=patience, eps=eps)
rmse = []
breaker()
for i in range(len(metrics)):
print("Fold {}, RMSE: {:.5f}".format(metrics[i]["Fold"], metrics[i]["RMSE"]))
rmse.append(metrics[i]["RMSE"])
best_index = rmse.index(min(rmse))
breaker()
print("Best RMSE : {:.5f}".format(metrics[best_index]["RMSE"]))
print("Avg RMSE : {:.5f}".format(sum(rmse) / len(rmse)))
breaker()
with open("metrics.pkl", "wb") as fp:
pickle.dump(metrics, fp)
main()
```
## End
```
breaker()
print("Notebook Rumtime : {:.2f} minutes".format((time() - notebook_start_time)/60))
breaker()
```
| github_jupyter |
```
from tqdm.auto import tqdm
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split , StratifiedKFold
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model, load_model, save_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.layers import Input,Dense, LSTM, RNN, Bidirectional, GlobalAveragePooling2D , Dropout, Conv1D, Flatten
from transformers import TFAutoModel , AutoTokenizer
# import ray
# from ray import tune
class config:
#train_path = "../input/dravidianlangtech2022-personal/Train_Data_Combined.csv"
#val_path = "../input/dravidianlangtech2022-personal/Validation_Data_Combined.csv"
test_path = "../input/test-for-dravid-lang-tech-new/ta-en-misogyny-test.csv"
save_dir = "./result"
seed = 55
try:
AUTOTUNE = tf.data.AUTOTUNE
except:
AUTOTUNE = tf.data.experimental.AUTOTUNE
epochs = 50
max_len = 64
batch_size = 32
hf_path = "google/muril-base-cased"
tokenizer_path = "../input/with-n-augmentfasttext-abusive-comment-detection-t/result/muril_tokenizer"
model_weights = "../input/with-n-augmentfasttext-abusive-comment-detection-t/result"
def seed_everything(seed = config.seed):
print(f"seeded everything to seed {seed}")
os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
if not os.path.exists(config.save_dir):
os.makedirs(config.save_dir)
seed_everything()
col_names = ['text']
df_test = pd.read_csv(config.test_path,names=col_names,sep='\t')
df_test.info()
#Tokenization Process
tokenizer = AutoTokenizer.from_pretrained(config.hf_path)
tokenizer.save_pretrained(os.path.join(config.save_dir , "muril_tokenizer"))
def fast_encode(texts, tokenizer, chunk_size=512, maxlen=config.max_len):
input_ids = []
tt_ids = []
at_ids = []
for i in tqdm(range(0, len(texts), chunk_size)):
text_chunk = texts[i:i+chunk_size]
encs = tokenizer(
text_chunk,
max_length = config.max_len,
padding='max_length',
truncation=True
)
input_ids.extend(encs['input_ids'])
tt_ids.extend(encs['token_type_ids'])
at_ids.extend(encs['attention_mask'])
return {'input_ids': input_ids, 'token_type_ids': tt_ids, 'attention_mask':at_ids}
test_token_data = fast_encode(list(df_test['text'].values), tokenizer)
#train_token_data['label'] = list(df_train['label'].values)
df_tokenized_test = pd.DataFrame(test_token_data)
len(df_tokenized_test['input_ids'][0])
del test_token_data
#preparing dataset
def test_prep_function(embeddings):
input_ids = embeddings['input_ids']
attention_mask = embeddings['attention_mask']
token_type_ids = embeddings['token_type_ids']
#target = tf.cast(target, tf.int32)
return {'input_ids': input_ids ,'token_type_ids':token_type_ids,'attention_mask': attention_mask}
# Detect hardware, return appropriate distribution strategy
try:
# TPU detection. No parameters necessary if TPU_NAME environment variable is
# set: this is always the case on Kaggle.
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print('Running on TPU ', tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
# Default distribution strategy in Tensorflow. Works on CPU and single GPU.
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
def create_model(transformer_model):
input_id_layer = Input(shape=(config.max_len,) ,dtype = tf.int32 , name = 'input_ids')
attention_mask_layer = Input(shape=(config.max_len,) , dtype = tf.int32 , name = 'attention_mask')
token_type_layer = Input(shape=(config.max_len,) , dtype = tf.int32 , name = 'token_type_ids')
transformer = transformer_model(input_ids = input_id_layer ,token_type_ids=token_type_layer,attention_mask = attention_mask_layer)[0]
x = Dropout(0.5)(transformer)
x = Conv1D(1,1)(x)
x = Flatten()(x)
predictions = Dense(8, activation = "softmax")(x)
model = Model(inputs=[input_id_layer ,token_type_layer, attention_mask_layer], outputs = predictions)
model.compile(
optimizer = Adam(learning_rate= 0.01),
metrics = ['accuracy'],
loss = 'sparse_categorical_crossentropy'
)
return model
with strategy.scope():
transformer_model = TFAutoModel.from_pretrained(config.hf_path)
transformer_model.bert.trainable = False
model = create_model(transformer_model)
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True,show_dtype=True)
from sklearn.metrics import accuracy_score
test_embeddings = {'input_ids': df_tokenized_test['input_ids'].tolist() ,'token_type_ids': df_tokenized_test['token_type_ids'].tolist(),"attention_mask":df_tokenized_test['attention_mask'].tolist()}
#y_train = df_tokenized_train['label']
#y_test = df_tokenized_val['label']
#train_dataset = tf.data.Dataset.from_tensor_slices((train_embeddings , y_train))
test_dataset = tf.data.Dataset.from_tensor_slices((test_embeddings))
test_dataset = (
test_dataset
.map(test_prep_function , num_parallel_calls = config.AUTOTUNE)
.batch(config.batch_size)
.prefetch(config.AUTOTUNE)
)
test_steps = len(test_embeddings['input_ids'])//config.batch_size
model.load_weights(f'{config.model_weights}/muril_fold_trained.h5')
y_predict = model.predict(test_dataset , verbose = 1)
predictions = y_predict
preds_classes = np.argmax(predictions, axis=-1)
preds_classes
df_pred = pd.DataFrame(preds_classes,columns=['label'])
df_pred.replace({0:'Counter-speech',
1:'Homophobia',
2:'Hope-Speech',
3:'Misandry',
4:'Misogyny',
5:'None-of-the-above',
6:'Transphobic',
7:'Xenophobia'},inplace = True)
df_pred
df_test[list(df_pred.columns)] = df_pred
df_test
df_test.to_csv('BpHigh_tamil-english.tsv',sep="\t")
df_test.label.value_counts()
```
| github_jupyter |
```
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, cross_val_score, GridSearchCV, RepeatedStratifiedKFold
from sklearn.preprocessing import StandardScaler, LabelEncoder
from category_encoders import OneHotEncoder
from sklearn.pipeline import make_pipeline
from sklearn.metrics import f1_score, precision_score, recall_score
from sklearn.linear_model import LogisticRegression
from sklearn.experimental import enable_hist_gradient_boosting
from sklearn.ensemble import AdaBoostClassifier, GradientBoostingClassifier, HistGradientBoostingClassifier, RandomForestClassifier, BaggingClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
import warnings
warnings.filterwarnings('ignore')
df = pd.read_csv('./data/train_10pct.csv')
df.head(5)
df.shape
df.mean().value_counts(normalize=True)
df_labels = pd.read_csv('./data/train_10pct_labels.csv')
df_labels.head(5)
df_labels.shape
# read in data
train_values = pd.read_csv('../data/Proj5_train_values.csv')
train_labels = pd.read_csv('./data/Proj5_train_labels.csv')
# grab first 10% of rows
train_values = train_values.head(int(len(train_values) * 0.1))
train_labels = train_labels.head(int(len(train_labels) * 0.1))
print(f'Train value shape - {train_values.shape}')
print(f'Train labels shape - {train_labels.shape}')
train_values.describe().T
```
---
## Merge Datasets
```
combo = pd.merge(train_values, train_labels, on = 'building_id')
combo.head(5)
plt.figure (figsize=(5,10));
sns.heatmap(df.corr()[['damage_grade']].sort_values(by='damage_grade', ascending = False),annot=True )
sns.scatterplot(data=combo, x='age', y='damage_grade', hue='age');
```
---
```
#Baseline
train_labels['damage_grade'].value_counts(normalize=True)
le = LabelEncoder()
train_enc = train_values.apply(le.fit_transform)
train_enc
#From Chris
#X = train_enc
#y = trainlabel['damage_grade']
#X_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y, random_state=123)
#pipe_forest = make_pipeline(StandardScaler(), DecisionTreeClassifier())
#params = {'decisiontreeclassifier__max_depth' : [2, 3, 4, 5]}
#grid_forest = GridSearchCV(pipe_forest, param_grid = params)
#grid_forest.fit(X_train,y_train)
#grid_forest.score(X_test,y_test) # I got 0.646815042210284
#grid_forest.best_estimator_
#TTS
X = train_enc
y = train_labels['damage_grade']
#X_train, X_test, y_train, y_test = train_test_split(train_enc,train_labels, random_state=123 )
X_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y, random_state=123)
```
## Model
```
#from Hackathon2
#Cvect and logreg
#pipe = make_pipeline(CountVectorizer(stop_words = 'english'), LogisticRegression(n_jobs=-1))
#
#params = {'countvectorizer__max_features':[500, 1000, 15000, 2000, 2500]}
#
#grid=GridSearchCV(pipe, param_grid=params, n_jobs= -1)
#grid.fit(X_train, y_train)
#
```
### logreg
```
#Cvect and logreg
pipe = make_pipeline(StandardScaler(),LogisticRegression(n_jobs=-1))
#
#params = {'countvectorizer__max_features':[500, 1000, 15000, 2000, 2500]}
#
#grid=GridSearchCV(pipe, n_jobs= -1)
pipe.fit(X_train, y_train)
pipe.score(X_train, y_train)
pipe.fit(X_test, y_test)
pipe.score(X_test, y_test)
pipe.get_params().keys()
#LogisticRegression
pipe_lgr = make_pipeline(StandardScaler(), LogisticRegression(n_jobs = -1, max_iter = 1000))
params = {'logisticregression__C' : [0.1, 0.75, 1, 10],
'logisticregression__solver' : ['newton-cg', 'lbfgs', 'liblinear']}
grid_lgr = GridSearchCV(pipe_lgr, param_grid = params)
grid_lgr.fit(X_train, y_train)
print(f'Train Score: {grid_lgr.score(X_train, y_train)}')
print(f'Test Score: {grid_lgr.score(X_test, y_test)}')
grid_lgr.best_params_
```
## Modeling KNN
```
# define models and parameters
# model = KNeighborsClassifier()
# n_neighbors = range(1, 21, 2)
# weights = ['uniform', 'distance']
# #metric = ['euclidean', 'manhattan', 'minkowski']
# metric = ['euclidean']
# # define grid search
# grid = dict(n_neighbors=n_neighbors,weights=weights,metric=metric)
# #cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# #grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0)
# grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, scoring='accuracy',error_score=0)
# grid_result = grid_search.fit(X, y)
# # summarize results
# print("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
# means = grid_result.cv_results_['mean_test_score']
# stds = grid_result.cv_results_['std_test_score']
# params = grid_result.cv_results_['params']
# for mean, stdev, param in zip(means, stds, params):
# print("%f (%f) with: %r" % (mean, stdev, param))
#Basic KNN
X = train_enc
y = train_labels['damage_grade']
X_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y, random_state=123)
pipe_knn = make_pipeline(StandardScaler(),KNeighborsClassifier(n_jobs=-1))
pipe_knn.fit(X_train, y_train)
pipe_knn.score(X_train, y_train)
print(f'Train Score: {pipe_knn.score(X_train, y_train)}')
print(f'Test Score: {pipe_knn.score(X_test, y_test)}')
```
### Trying Veronica's code - KNN testing
```
pipe_knn = make_pipeline(StandardScaler(), KNeighborsClassifier(n_jobs = -1))
# n_neighbors must be odd to avoid an even split
#Note: tried leaf size and p, but it didn't give us any value
params = {'kneighborsclassifier__n_neighbors' : [5, 7, 9, 11]}
grid_knn = GridSearchCV(pipe_knn, param_grid = params)
grid_knn.fit(X_train, y_train)
print(f'Train Score: {grid_knn.score(X_train, y_train)}')
print(f'Test Score: {grid_knn.score(X_test, y_test)}')
grid_knn.best_params_
```
### 5/13 per Jacob, use OHE instead of LabelEncoding
#### LOG with OHE
```
X = train_values
y = train_labels['damage_grade']
X_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y, random_state=123)
```
---
```
#Cvect and logreg
#define X and y
X = train_values
y = train_labels['damage_grade']
X_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y, random_state=123)
#Create pipeline
pipe = make_pipeline(OneHotEncoder(),StandardScaler(with_mean=False), LogisticRegression(n_jobs=-1))
params = {'logisticregression__C' : [0.1, 0.75, 1, 10],
#'logisticregression__solver' : ['newton-cg', 'lbfgs', 'liblinear']
}
grid_lgr = GridSearchCV(pipe, param_grid = params)
grid_lgr.fit(X_train, y_train)
grid_lgr.score(X_test, y_test)
print(f'Train Score: {grid_lgr.score(X_train, y_train)}')
print(f'Test Score: {grid_lgr.score(X_test, y_test)}')
```
#### KNN with OHE
```
#train_values = train_values.head(int(len(train_values) * 0.1))
#train_labels = train_labels.head(int(len(train_labels) * 0.1))
X = train_values
y = train_labels['damage_grade']
X_train, X_test, y_train, y_test = train_test_split(X,y,stratify=y, random_state=123)
pipe_knn = make_pipeline(OneHotEncoder(),StandardScaler(), KNeighborsClassifier(n_jobs = -1))
# n_neighbors must be odd to avoid an even split
params = {'kneighborsclassifier__n_neighbors' : [5, 7, 9, 11]}
#'kneighborsclassifier__leaf_size': [1,5,10,30]}
#define parameters for hypertuning
#params = {
# 'n_neighbors': [5, 7, 9, 11],
# 'leaf_size': (1,30),
# 'p': (1,2)
grid_knn = GridSearchCV(pipe_knn, param_grid = params)
grid_knn.fit(X_train, y_train)
print(f'Train Score: {grid_knn.score(X_train, y_train)}')
print(f'Test Score: {grid_knn.score(X_test, y_test)}')
grid_knn.best_params_
```
---
```
#https://medium.datadriveninvestor.com/k-nearest-neighbors-in-python-hyperparameters-tuning-716734bc557f
#List Hyperparameters that we want to tune.
leaf_size = list(range(1,50))
n_neighbors = list(range(1,30))
p=[1,2]
#Convert to dictionary
hyperparameters = dict(leaf_size=leaf_size, n_neighbors=n_neighbors, p=p)
#Create new KNN object
knn_2 = KNeighborsClassifier()
#Use GridSearch
clf = GridSearchCV(knn_2, hyperparameters, cv=10)
parameters_KNN = {
'n_neighbors': (1,10, 1),
'leaf_size': (20,40,1),
'p': (1,2),
'weights': ('uniform', 'distance'),
'metric': ('minkowski', 'chebyshev'),
# with GridSearch
grid_search_KNN = GridSearchCV(
estimator=estimator_KNN,
param_grid=parameters_KNN,
scoring = 'accuracy',
n_jobs = -1,
cv = 5
```
| github_jupyter |
# LDA Training
<figure>
<div>
<img src=https://s2.loli.net/2022/02/28/X7vzOlDHJtP6UnM.png width="600">
</div>
<figcaption>The LDA training algorithm from <a href=http://www.arbylon.net/publications/text-est.pdf>Parameter estimation for text analysis</a></figcaption>
</figure>
```
import random
import numpy as np
from collections import defaultdict, OrderedDict
from types import SimpleNamespace
from tqdm.notebook import tqdm
from visualize import visualize_topic_word
# === corpus loading ===
class NeurIPSCorpus:
def __init__(self, data_path, num_topics, mode, start_doc_idx=0, max_num_docs=100, max_num_words=10000, max_doc_length=1000, train_corpus=None):
self.docs = []
self.word2id = OrderedDict()
self.max_doc_length = max_doc_length
self.mode = mode
# only keep the most frequent words
if self.mode == "train":
word2cnt = defaultdict(int)
with open(data_path) as fin:
for i, line in enumerate(list(fin)[::-1]): # use more recent papers
if i >= max_num_docs: break
for word in line.strip().split():
word2cnt[word] += 1
word2cnt = sorted(list(word2cnt.items()), key=lambda x: x[1], reverse=True)
if len(word2cnt) > max_num_words:
word2cnt = word2cnt[:max_num_words]
word2cnt = dict(word2cnt)
# read in the doc and convert words to integers
with open(data_path) as fin:
for i, line in enumerate(list(fin)[::-1]): # use more recent papers
if i < start_doc_idx: continue
if i - start_doc_idx >= max_num_docs: break
doc = []
for word in line.strip().split():
if len(doc) >= self.max_doc_length: break
if self.mode == "train":
if word not in word2cnt: continue
if word not in self.word2id:
self.word2id[word] = len(self.word2id)
doc.append(self.word2id[word])
else:
if word not in train_corpus.word2id: continue
doc.append(train_corpus.word2id[word])
self.docs.append(doc)
self.num_docs = len(self.docs)
self.num_topics = num_topics
self.num_words = len(self.word2id)
self.id2word = {v: k for k, v in self.word2id.items()}
print(
"num_docs:", self.num_docs,
"num_topics:", self.num_topics,
"num_words:", self.num_words
)
corpus = NeurIPSCorpus(
data_path="data/papers.txt",
mode="train",
num_topics=10,
start_doc_idx=0,
max_num_docs=1000,
max_num_words=10000,
max_doc_length=200,
)
hparams = SimpleNamespace(
alpha=np.ones([corpus.num_topics], dtype=float) / corpus.num_topics,
beta = np.ones([corpus.num_words], dtype=float) / corpus.num_topics,
gibbs_sampling_max_iters=500,
)
# === initialization ===
print("Initializing...", flush=True)
n_doc_topic = np.zeros([corpus.num_docs, corpus.num_topics], dtype=float) # n_m^(k)
n_topic_word = np.zeros([corpus.num_topics, corpus.num_words], dtype=float) # n_k^(t)
z_doc_word = np.zeros([corpus.num_docs, corpus.max_doc_length], dtype=int)
for doc_i in range(corpus.num_docs):
for j, word_j in enumerate(corpus.docs[doc_i]):
topic_ij = random.randint(0, corpus.num_topics - 1)
n_doc_topic[doc_i, topic_ij] += 1
n_topic_word[topic_ij, word_j] += 1
z_doc_word[doc_i, j] = topic_ij
# === Gibbs sampling ===
print("Gibbs sampling...", flush=True)
for iteration in tqdm(range(hparams.gibbs_sampling_max_iters)):
for doc_i in range(corpus.num_docs):
for j, word_j in enumerate(corpus.docs[doc_i]):
# remove the old assignment
topic_ij = z_doc_word[doc_i, j]
n_doc_topic[doc_i, topic_ij] -= 1
n_topic_word[topic_ij, word_j] -= 1
# compute the new assignment
p_doc_topic = (n_doc_topic[doc_i, :] + hparams.alpha) \
/ np.sum(n_doc_topic[doc_i] + hparams.alpha)
p_topic_word = (n_topic_word[:, word_j] + hparams.beta[word_j]) \
/ np.sum(n_topic_word + hparams.beta, axis=1)
p_topic = p_doc_topic * p_topic_word
p_topic /= np.sum(p_topic)
# record the new assignment
new_topic_ij = np.random.choice(np.arange(corpus.num_topics), p=p_topic)
n_doc_topic[doc_i, new_topic_ij] += 1
n_topic_word[new_topic_ij, word_j] += 1
z_doc_word[doc_i, j] = new_topic_ij
if iteration % 50 == 0:
print(f"Iter [{iteration}]===")
# === Check convergence and read out parameters ===
theta = (n_doc_topic + hparams.alpha) / np.sum(n_doc_topic + hparams.alpha, axis=1, keepdims=True)
phi = (n_topic_word + hparams.beta) / np.sum(n_topic_word + hparams.beta, axis=1, keepdims=True)
all_top_words = []
all_top_probs = []
for topic in range(corpus.num_topics):
top_words = np.argsort(phi[topic])[::-1][:10]
top_probs = phi[topic, top_words]
top_words = [corpus.id2word[word] for word in top_words]
all_top_words.append(top_words)
all_top_probs.append(top_probs)
print(f"Topic {topic}:", top_words)
visualize_topic_word(all_top_words, all_top_probs)
```
# Inference on unseen documents
```
# === inference on unseen documents ===
test_corpus = NeurIPSCorpus(
data_path="data/papers.txt",
mode="test",
num_topics=10,
start_doc_idx=1000,
max_num_docs=5,
max_num_words=10000,
max_doc_length=200,
train_corpus=corpus,
)
# === inference via Gibbs sampling ===
for i, doc in enumerate(test_corpus.docs):
print(f"\nTest Doc [{i}] ===")
doc_i = 0 # only infer 1 test doc at a time
test_n_doc_topic = np.zeros([1, corpus.num_topics], dtype=float)
test_n_topic_word = np.zeros([corpus.num_topics, corpus.num_words], dtype=float)
test_z_doc_word = np.zeros([1, corpus.max_doc_length], dtype=int)
print(" ".join([corpus.id2word[x] for x in doc]))
for j, word_j in enumerate(doc):
topic_ij = random.randint(0, corpus.num_topics - 1)
test_n_doc_topic[doc_i, topic_ij] += 1
test_n_topic_word[topic_ij, word_j] += 1
test_z_doc_word[doc_i, j] = topic_ij
for iteration in tqdm(range(100)):
for j, word_j in enumerate(doc):
# remove the old assignment
topic_ij = test_z_doc_word[doc_i, j]
test_n_doc_topic[doc_i, topic_ij] -= 1
test_n_topic_word[topic_ij, word_j] -= 1
# compute the new assignment (new sampling formula!)
p_doc_topic = (test_n_doc_topic[doc_i, :] + hparams.alpha) \
/ np.sum(test_n_doc_topic[doc_i] + hparams.alpha)
p_topic_word = (test_n_topic_word[:, word_j] + n_topic_word[:, word_j] + hparams.beta[word_j]) \
/ np.sum(test_n_topic_word + n_topic_word + hparams.beta, axis=1)
p_topic = p_doc_topic * p_topic_word
p_topic /= np.sum(p_topic)
# record the new assignment
new_topic_ij = np.random.choice(np.arange(corpus.num_topics), p=p_topic)
test_n_doc_topic[doc_i, new_topic_ij] += 1
test_n_topic_word[new_topic_ij, word_j] += 1
test_z_doc_word[doc_i, j] = new_topic_ij
# === Check convergence and read out parameters ===
test_theta = (test_n_doc_topic + hparams.alpha) / np.sum(test_n_doc_topic + hparams.alpha, axis=1, keepdims=True)
test_phi = (test_n_topic_word + hparams.beta) / np.sum(test_n_topic_word + hparams.beta, axis=1, keepdims=True)
print("Topic distribution:", [float(f"{x:.4f}") for x in test_theta[0]])
print("Top 3 topics:", np.argsort(test_theta[0])[::-1][:3])
```
Compare with the learned topics:
<img src=https://raw.githubusercontent.com/mistylight/picbed/main/Hexo/Screen%20Shot%202022-02-28%20at%205.10.12%20PM.png style="width: 1200px">
| github_jupyter |
```
import numpy as np
import matplotlib.pyplot as plt
import json
import scipy.stats as st
```
Set plot font size
```
FS = 18
```
Get dictionary with information about errors and p-values during convergent time steps
```
fname = './data/p3_p7_evolve_results/190211_errs_per_conv_ts_pr_0.005_g_1.1_niter_100.json'
with open(fname,'r') as f:
c_err_results = json.loads(f.read())
# Inspect keys
print(c_err_results.keys())
# Go through simulation iterations and compute the min, max, and best
# (where errors are minimized and p-values are maximized) time step for each
itercurr = []
min_c_ts = []
max_c_ts = []
mean_c_ts = []
best_c_ts = []
iters = list(set(c_err_results['iteration']))
for ic in iters:
rowscurr = [i for i,q in enumerate(c_err_results['iteration']) if q == ic]
encfscurr = [c_err_results['err_ncfs'][q] for q in rowscurr]
enpcscurr = [c_err_results['err_npcs'][q] for q in rowscurr]
pnsynscurr = [c_err_results['p_nsyns'][q] for q in rowscurr]
pnsynspcfcurr = [c_err_results['p_nsynspcf'][q] for q in rowscurr]
pnpcspcfcurr = [c_err_results['p_npcspcf'][q] for q in rowscurr]
pncfsppccurr = [c_err_results['p_ncfsppc'][q] for q in rowscurr]
tscurr = [c_err_results['time_step'][q] for q in rowscurr]
itercurr.append(ic)
min_c_ts.append(np.min(tscurr))
max_c_ts.append(np.max(tscurr))
mean_c_ts.append(np.mean(tscurr))
b_encfs = [i for i,q in enumerate(encfscurr) if q == np.min(encfscurr)]
b_enpcs = [i for i,q in enumerate(enpcscurr) if q == np.min(enpcscurr)]
b_pnsyns = [i for i,q in enumerate(pnsynscurr) if q == np.max(pnsynscurr)]
b_pnsynspcf = [i for i,q in enumerate(pnsynspcfcurr) if q == np.max(pnsynspcfcurr)]
b_pnpcspcf = [i for i,q in enumerate(pnpcspcfcurr) if q == np.max(pnpcspcfcurr)]
b_pncfsppc = [i for i,q in enumerate(pncfsppccurr) if q == np.max(pncfsppccurr)]
tben = [tscurr[q] for q in b_encfs]
tbep = [tscurr[q] for q in b_enpcs]
tpnsyns = [tscurr[q] for q in b_pnsyns]
tpnspcf = [tscurr[q] for q in b_pnsynspcf]
tpnpcpcf = [tscurr[q] for q in b_pnpcspcf]
tpncfppc = [tscurr[q] for q in b_pncfsppc]
# Find the time step where most of these conditions are true
b_ts = st.mode(tben + tbep + tpnsyns + tpnspcf + tpnpcpcf + tpncfppc)[0][0]
best_c_ts.append(b_ts)
plt.figure(figsize=(10,10))
plt.hist(best_c_ts)
plt.xlabel('time step of best convergence',fontsize=FS)
plt.ylabel('number of occurrences',fontsize=FS)
plt.title('Best convergence times for iterations of simulation with pr 0.005, g 1.1',fontsize=FS)
plt.show()
print('mean best convergence time = {0} +/- {1} time steps'.format(np.mean(best_c_ts),st.sem(best_c_ts)))
plt.figure(figsize=(10,10))
plt.hist(mean_c_ts)
plt.xlabel('mean time step of convergence',fontsize=FS)
plt.ylabel('number of occurrences',fontsize=FS)
plt.title('Mean convergence times for iterations of simulation with pr 0.005, g 1.1',fontsize=FS)
plt.show()
print('mean of mean convergent time steps = {0}'.format(np.mean(mean_c_ts)))
np.max(iters)
```
| github_jupyter |
[View in Colaboratory](https://colab.research.google.com/github/schwaaweb/aimlds1_11-NLP/blob/master/M11_A_DJ_NLP_Assignment.ipynb)
### Assignment: Natural Language Processing
In this assignment, you will work with a data set that contains restaurant reviews. You will use a Naive Bayes model to classify the reviews (positive or negative) based on the words in the review. The main objective of this assignment is gauge the performance of a Naive Bayes model by using a confusion matrix; however in order to ascertain the efficiacy of the model, you will have to first train the Naive Bayes model with a portion (i.e. 70%) of the underlying data set and then test it against the remainder of the data set . Before you can train the model, you will have to go through a sequence of steps to get the data ready for training the model.
Steps you may need to perform:
**1) **Read in the list of restaurant reviews
**2)** Convert the reviews into a list of tokens
**3) **You will most likely have to eliminate stop words
**4)** You may have to utilize stemming or lemmatization to determine the base form of the words
**5) **You will have to vectorize the data (i.e. construct a document term/word matix) wherein select words from the reviews will constitute the columns of the matrix and the individual reviews will be part of the rows of the matrix
**6) ** Create 'Train' and 'Test' data sets (i.e. 70% of the underlying data set will constitute the training set and 30% of the underlying data set will constitute the test set)
**7)** Train a Naive Bayes model on the Train data set and test it against the test data set
**8) **Construct a confusion matirx to gauge the performance of the model
**Dataset**: https://www.dropbox.com/s/yl5r7kx9nq15gmi/Restaurant_Reviews.tsv?raw=1
**1) **Read in the list of restaurant reviews
```
#%%time
#!wget -c https://www.dropbox.com/s/yl5r7kx9nq15gmi/Restaurant_Reviews.tsv?raw=1 && mv Restaurant_Reviews.tsv?raw=1 Restaurant_Reviews.tsv
!ls -lh *tsv
%%time
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import re
import string
import nltk
nltk.download('all')
df = pd.read_csv('Restaurant_Reviews.tsv', sep='\t')
df.head()
df.tail()
```
**2)** Convert the reviews into a list of tokens
```
review = df['Review'] # dropping the like here
print(review)
len(review)
```
**3) **You will most likely have to eliminate stop words
**4)** You may have to utilize stemming or lemmatization to determine the base form of the words
```
stopwords = nltk.corpus.stopwords.words('english')
ps = nltk.PorterStemmer()
#Elmiminate punctations
#Tokenize based on whitespace
#Stem the text
#Remove stopwords
def process_text(txt):
eliminate_punct = "".join([word.lower() for word in txt if word not in string.punctuation])
tokens = re.split('\W+', txt)
txt = [ps.stem(word) for word in tokens if word not in stopwords]
return txt
df['clean_review'] = df['Review'].apply(lambda x: process_text(x))
df.head()
import gensim
# Use the Gensim document to create a dictionary - a dictionary maps every word to a number
dictionary = gensim.corpora.Dictionary(df['clean_review'])
# Examine the length of the dictionary
num_of_words = len(dictionary)
print("# of words in dictionary: {}".format(num_of_words))
#for index,word in dictionary.items():
# print(index,word)
print(dictionary)
#print(dictionary.token2id)
```
**5) **You will have to vectorize the data (i.e. construct a document term/word matix) wherein select words from the reviews will constitute the columns of the matrix and the individual reviews will be part of the rows of the matrix
```
from pprint import pprint
%%time
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
def cv(data):
count_vectorizer = CountVectorizer()
emb = count_vectorizer.fit_transform(data)
return emb, count_vectorizer
list_corpus = df["clean_review"].tolist()
list_labels = df["Liked"].tolist()
X_train, X_test, y_train, y_test = train_test_split(list_corpus, list_labels, test_size=0.3, random_state=42)
#X_train_counts, count_vectorizer = cv(X_train)
#X_test_counts = count_vectorizer.transform(X_test)
#pprint(X_train)
#from sklearn.feature_extraction.text import CountVectorizer
#count_vect = CountVectorizer(analyzer=process_text, max_features=1668)
#W_counts = count_vect.fit_transform(df['clean_review'])
#print(W_counts.shape)
#print(count_vect.get_feature_names())
%%time
corpus = [dictionary.doc2bow(text) for text in list_corpus]
tfidf = gensim.models.TfidfModel(corpus)
corpus_tfidf = tfidf[corpus]
index = gensim.similarities.MatrixSimilarity(tfidf[corpus])
sims = index[corpus_tfidf]
#for vector in corpus:
# print(vector)
print(sims.shape)
```
**6) ** Create 'Train' and 'Test' data sets (i.e. 70% of the underlying data set will constitute the training set and 30% of the underlying data set will constitute the test set)
**7)** Train a Naive Bayes model on the Train data set and test it against the test data set
**8) **Construct a confusion matirx to gauge the performance of the model
| github_jupyter |
# Machine Learning Engineer Nanodegree
## Unsupervised Learning
## Project 3: Creating Customer Segments
Welcome to the third project of the Machine Learning Engineer Nanodegree! In this notebook, some template code has already been provided for you, and it will be your job to implement the additional functionality necessary to successfully complete this project. Sections that begin with **'Implementation'** in the header indicate that the following block of code will require additional functionality which you must provide. Instructions will be provided for each section and the specifics of the implementation are marked in the code block with a `'TODO'` statement. Please be sure to read the instructions carefully!
In addition to implementing code, there will be questions that you must answer which relate to the project and your implementation. Each section where you will answer a question is preceded by a **'Question X'** header. Carefully read each question and provide thorough answers in the following text boxes that begin with **'Answer:'**. Your project submission will be evaluated based on your answers to each of the questions and the implementation you provide.
>**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
## Getting Started
In this project, you will analyze a dataset containing data on various customers' annual spending amounts (reported in *monetary units*) of diverse product categories for internal structure. One goal of this project is to best describe the variation in the different types of customers that a wholesale distributor interacts with. Doing so would equip the distributor with insight into how to best structure their delivery service to meet the needs of each customer.
The dataset for this project can be found on the [UCI Machine Learning Repository](https://archive.ics.uci.edu/ml/datasets/Wholesale+customers). For the purposes of this project, the features `'Channel'` and `'Region'` will be excluded in the analysis — with focus instead on the six product categories recorded for customers.
Run the code block below to load the wholesale customers dataset, along with a few of the necessary Python libraries required for this project. You will know the dataset loaded successfully if the size of the dataset is reported.
```
# Import libraries necessary for this project
import numpy as np
import pandas as pd
import renders as rs
from IPython.display import display # Allows the use of display() for DataFrames
# Show matplotlib plots inline (nicely formatted in the notebook)
%matplotlib inline
# Load the wholesale customers dataset
try:
data = pd.read_csv("customers.csv")
data.drop(['Region', 'Channel'], axis = 1, inplace = True)
print "Wholesale customers dataset has {} samples with {} features each.".format(*data.shape)
except:
print "Dataset could not be loaded. Is the dataset missing?"
```
## Data Exploration
In this section, you will begin exploring the data through visualizations and code to understand how each feature is related to the others. You will observe a statistical description of the dataset, consider the relevance of each feature, and select a few sample data points from the dataset which you will track through the course of this project.
Run the code block below to observe a statistical description of the dataset. Note that the dataset is composed of six important product categories: **'Fresh'**, **'Milk'**, **'Grocery'**, **'Frozen'**, **'Detergents_Paper'**, and **'Delicatessen'**. Consider what each category represents in terms of products you could purchase.
```
# Display a description of the dataset
display(data.describe())
```
### Implementation: Selecting Samples
To get a better understanding of the customers and how their data will transform through the analysis, it would be best to select a few sample data points and explore them in more detail. In the code block below, add **three** indices of your choice to the `indices` list which will represent the customers to track. It is suggested to try different sets of samples until you obtain customers that vary significantly from one another.
```
# TODO: Select three indices of your choice you wish to sample from the dataset
indices = [0, 15, 45]
# Create a DataFrame of the chosen samples
samples = pd.DataFrame(data.loc[indices], columns = data.keys()).reset_index(drop = True)
print "Chosen samples of wholesale customers dataset:"
display(samples)
```
### Question 1
Consider the total purchase cost of each product category and the statistical description of the dataset above for your sample customers.
*What kind of establishment (customer) could each of the three samples you've chosen represent?*
**Hint:** Examples of establishments include places like markets, cafes, and retailers, among many others. Avoid using names for establishments, such as saying *"McDonalds"* when describing a sample customer as a restaurant.
**Answer:**
Customer 0 - Seems to be a cafe or restaurant given higher than average consumption of Milk and grocery and lower consumption for other products.
Customer 15 - Only product which is near the mean for this customer is "Fresh". In general there is low consumption for almost all other products (nearing 50 percentile). This customer is probably a small grocery shop.
Customer 45 - Very high comsumption of Milk, groceries and detergents above average points to this is probably a big restaurant or Bakery.
### Implementation: Feature Relevance
One interesting thought to consider is if one (or more) of the six product categories is actually relevant for understanding customer purchasing. That is to say, is it possible to determine whether customers purchasing some amount of one category of products will necessarily purchase some proportional amount of another category of products? We can make this determination quite easily by training a supervised regression learner on a subset of the data with one feature removed, and then score how well that model can predict the removed feature.
In the code block below, you will need to implement the following:
- Assign `new_data` a copy of the data by removing a feature of your choice using the `DataFrame.drop` function.
- Use `sklearn.cross_validation.train_test_split` to split the dataset into training and testing sets.
- Use the removed feature as your target label. Set a `test_size` of `0.25` and set a `random_state`.
- Import a decision tree regressor, set a `random_state`, and fit the learner to the training data.
- Report the prediction score of the testing set using the regressor's `score` function.
```
# TODO: Make a copy of the DataFrame, using the 'drop' function to drop the given feature
new_data = data.drop('Detergents_Paper',axis=1)
# TODO: Split the data into training and testing sets using the given feature as the target
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(new_data, data['Detergents_Paper'], test_size = 0.25, random_state = 0)
# TODO: Create a decision tree regressor and fit it to the training set
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor()
# TODO: Report the score of the prediction using the testing set
from sklearn.metrics import r2_score
model = regressor.fit(X_train,y_train)
score = r2_score(y_test, model.predict(X_test))
print (score)
```
### Question 2
*Which feature did you attempt to predict? What was the reported prediction score? Is this feature is necessary for identifying customers' spending habits?*
**Hint:** The coefficient of determination, `R^2`, is scored between 0 and 1, with 1 being a perfect fit. A negative `R^2` implies the model fails to fit the data.
**Answer:**
I chose Deteregents_Paper for prediction. The R^2 score achieved was 0.67 which shows that this feature is dependent on some (non-linear) combination of other features. Since this feature is not independent of other features, this feature may not provide unique information about customer's spending habits.
### Visualize Feature Distributions
To get a better understanding of the dataset, we can construct a scatter matrix of each of the six product features present in the data. If you found that the feature you attempted to predict above is relevant for identifying a specific customer, then the scatter matrix below may not show any correlation between that feature and the others. Conversely, if you believe that feature is not relevant for identifying a specific customer, the scatter matrix might show a correlation between that feature and another feature in the data. Run the code block below to produce a scatter matrix.
```
# Produce a scatter matrix for each pair of features in the data
pd.scatter_matrix(data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
```
### Question 3
*Are there any pairs of features which exhibit some degree of correlation? Does this confirm or deny your suspicions about the relevance of the feature you attempted to predict? How is the data for those features distributed?*
**Hint:** Is the data normally distributed? Where do most of the data points lie?
**Answer:** Above figure shows a strong correlation between Detergent_Paper and Grocery. Also, there is a weaker correlation between Milk and Detergent_Paper, Milk and Grocery. This infact confirms my suspicion of Detergent_Paper being dependent on other features.
The data is definetly not normally distirbuted and most of it is cluttered near the origin.
## Data Preprocessing
In this section, you will preprocess the data to create a better representation of customers by performing a scaling on the data and detecting (and optionally removing) outliers. Preprocessing data is often times a critical step in assuring that results you obtain from your analysis are significant and meaningful.
### Implementation: Feature Scaling
If data is not normally distributed, especially if the mean and median vary significantly (indicating a large skew), it is most [often appropriate](http://econbrowser.com/archives/2014/02/use-of-logarithms-in-economics) to apply a non-linear scaling — particularly for financial data. One way to achieve this scaling is by using a [Box-Cox test](http://scipy.github.io/devdocs/generated/scipy.stats.boxcox.html), which calculates the best power transformation of the data that reduces skewness. A simpler approach which can work in most cases would be applying the natural logarithm.
In the code block below, you will need to implement the following:
- Assign a copy of the data to `log_data` after applying a logarithm scaling. Use the `np.log` function for this.
- Assign a copy of the sample data to `log_samples` after applying a logrithm scaling. Again, use `np.log`.
```
# TODO: Scale the data using the natural logarithm
log_data = np.log(data)
# TODO: Scale the sample data using the natural logarithm
log_samples = np.log(samples)
# Produce a scatter matrix for each pair of newly-transformed features
pd.scatter_matrix(log_data, alpha = 0.3, figsize = (14,8), diagonal = 'kde');
```
### Observation
After applying a natural logarithm scaling to the data, the distribution of each feature should appear much more normal. For any pairs of features you may have identified earlier as being correlated, observe here whether that correlation is still present (and whether it is now stronger or weaker than before).
Run the code below to see how the sample data has changed after having the natural logarithm applied to it.
```
# Display the log-transformed sample data
display(log_samples)
```
### Implementation: Outlier Detection
Detecting outliers in the data is extremely important in the data preprocessing step of any analysis. The presence of outliers can often skew results which take into consideration these data points. There are many "rules of thumb" for what constitutes an outlier in a dataset. Here, we will use [Tukey's Method for identfying outliers](http://datapigtechnologies.com/blog/index.php/highlighting-outliers-in-your-data-with-the-tukey-method/): An *outlier step* is calculated as 1.5 times the interquartile range (IQR). A data point with a feature that is beyond an outlier step outside of the IQR for that feature is considered abnormal.
In the code block below, you will need to implement the following:
- Assign the value of the 25th percentile for the given feature to `Q1`. Use `np.percentile` for this.
- Assign the value of the 75th percentile for the given feature to `Q3`. Again, use `np.percentile`.
- Assign the calculation of an outlier step for the given feature to `step`.
- Optionally remove data points from the dataset by adding indices to the `outliers` list.
**NOTE:** If you choose to remove any outliers, ensure that the sample data does not contain any of these points!
Once you have performed this implementation, the dataset will be stored in the variable `good_data`.
```
from sets import Set
outliers_indices = {}
# For each feature find the data points with extreme high or low values
for feature in log_data.keys():
# TODO: Calculate Q1 (25th percentile of the data) for the given feature
Q1 = np.percentile(log_data[feature], 25)
# TODO: Calculate Q3 (75th percentile of the data) for the given feature
Q3 = np.percentile(log_data[feature], 75)
# TODO: Use the interquartile range to calculate an outlier step (1.5 times the interquartile range)
step = 1.5 * (Q3 - Q1)
# Display the outliers
print "Data points considered outliers for the feature '{}':".format(feature)
outlier = log_data[~((log_data[feature] >= Q1 - step) & (log_data[feature] <= Q3 + step))]
outliers_indices[feature] = Set(outlier.index)
display(outlier)
# Find outlier in all the Feature
consistent_outliers = Set(outliers_indices['Fresh'])
for feature in outliers_indices.keys():
consistent_outliers.intersection_update(feature)
#print ("Outlier in all of the features: " + str(consistent_outliers))
# Create histogram for outliers => map of customer index to num of outlier features
hist_outliers = {}
for feature in outliers_indices.keys():
for idx in outliers_indices[feature]:
hist_outliers[idx] = hist_outliers[idx] + 1 if idx in hist_outliers.keys() else 1
# Find out liers in more than one feature
twice_outliers = [key for key,item in hist_outliers.iteritems() if item > 1]
# print twice_outliers
# OPTIONAL: Select the indices for data points you wish to remove
outliers = twice_outliers
# Remove the outliers, if any were specified
good_data = log_data.drop(log_data.index[outliers]).reset_index(drop = True)
```
### Question 4
*Are there any data points considered outliers for more than one feature based on the definition above? Should these data points be removed from the dataset? If any data points were added to the `outliers` list to be removed, explain why.*
**Answer:** Datapoints which were outliers in the more than one feature should be considered outliers. Such points have been assigned to outliers and hence have been removed from dataset. There is no datapoint which is outlier in all the features.
## Feature Transformation
In this section you will use principal component analysis (PCA) to draw conclusions about the underlying structure of the wholesale customer data. Since using PCA on a dataset calculates the dimensions which best maximize variance, we will find which compound combinations of features best describe customers.
### Implementation: PCA
Now that the data has been scaled to a more normal distribution and has had any necessary outliers removed, we can now apply PCA to the `good_data` to discover which dimensions about the data best maximize the variance of features involved. In addition to finding these dimensions, PCA will also report the *explained variance ratio* of each dimension — how much variance within the data is explained by that dimension alone. Note that a component (dimension) from PCA can be considered a new "feature" of the space, however it is a composition of the original features present in the data.
In the code block below, you will need to implement the following:
- Import `sklearn.decomposition.PCA` and assign the results of fitting PCA in six dimensions with `good_data` to `pca`.
- Apply a PCA transformation of the sample log-data `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
```
# TODO: Apply PCA by fitting the good data with the same number of dimensions as features
from sklearn.decomposition import PCA
pca = PCA(n_components=6)
pca.fit(good_data)
# TODO: Transform the sample log-data using the PCA fit above
pca_samples = pca.transform(log_samples)
# Generate PCA results plot
pca_results = rs.pca_results(good_data, pca)
print pca.explained_variance_ratio_.cumsum()
```
### Question 5
*How much variance in the data is explained* ***in total*** *by the first and second principal component? What about the first four principal components? Using the visualization provided above, discuss what the first four dimensions best represent in terms of customer spending.*
**Hint:** A positive increase in a specific dimension corresponds with an *increase* of the *positive-weighted* features and a *decrease* of the *negative-weighted* features. The rate of increase or decrease is based on the indivdual feature weights.
**Answer:** As shown above, approx 44% and 70% resp. is explained by the first and second principle component. First four components together explains approx 93% of data.
Dim 1 - The prevalant components in this dim are Milk Grocery and Detergents_paper. This makes sense as the visualizations in the scatter matrix showed the strong pair-wise dependence in these three components.
Dim 2 - Fresh, Frozen, and delicatessen are most prominent components in this dim. which did not show the correlation with the components of first dim in scatter matrix.
Dim 3 - Fresh and Delicatessen seem to be major components here capturing the negative correlation.
Dim 4 - This dim shows negative correlation in frozen and Delicatessen components.
### Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it in six dimensions. Observe the numerical value for the first four dimensions of the sample points. Consider if this is consistent with your initial interpretation of the sample points.
```
# Display sample log-data after having a PCA transformation applied
display(pd.DataFrame(np.round(pca_samples, 4), columns = pca_results.index.values))
```
### Implementation: Dimensionality Reduction
When using principal component analysis, one of the main goals is to reduce the dimensionality of the data — in effect, reducing the complexity of the problem. Dimensionality reduction comes at a cost: Fewer dimensions used implies less of the total variance in the data is being explained. Because of this, the *cumulative explained variance ratio* is extremely important for knowing how many dimensions are necessary for the problem. Additionally, if a signifiant amount of variance is explained by only two or three dimensions, the reduced data can be visualized afterwards.
In the code block below, you will need to implement the following:
- Assign the results of fitting PCA in two dimensions with `good_data` to `pca`.
- Apply a PCA transformation of `good_data` using `pca.transform`, and assign the reuslts to `reduced_data`.
- Apply a PCA transformation of the sample log-data `log_samples` using `pca.transform`, and assign the results to `pca_samples`.
```
# TODO: Apply PCA by fitting the good data with only two dimensions
pca = PCA(n_components=2).fit(good_data)
# TODO: Transform the good data using the PCA fit above
reduced_data = pca.transform(good_data)
# TODO: Transform the sample log-data using the PCA fit above
pca_samples = pca.transform(log_samples)
# Create a DataFrame for the reduced data
reduced_data = pd.DataFrame(reduced_data, columns = ['Dimension 1', 'Dimension 2'])
```
### Observation
Run the code below to see how the log-transformed sample data has changed after having a PCA transformation applied to it using only two dimensions. Observe how the values for the first two dimensions remains unchanged when compared to a PCA transformation in six dimensions.
```
# Display sample log-data after applying PCA transformation in two dimensions
display(pd.DataFrame(np.round(pca_samples, 4), columns = ['Dimension 1', 'Dimension 2']))
```
## Clustering
In this section, you will choose to use either a K-Means clustering algorithm or a Gaussian Mixture Model clustering algorithm to identify the various customer segments hidden in the data. You will then recover specific data points from the clusters to understand their significance by transforming them back into their original dimension and scale.
### Question 6
*What are the advantages to using a K-Means clustering algorithm? What are the advantages to using a Gaussian Mixture Model clustering algorithm? Given your observations about the wholesale customer data so far, which of the two algorithms will you use and why?*
**Answer:** Major advantage of K-means clustering is that it is converges quite fast as compared to other clustering algorithms. However K means is ameneable to getting stuck in local minima.
Gaussian Mixture model is generalization of K-means clustering. It does not need to assign hard clusters to the data-points and hence can have a probablistic association of a data point to clusters.
Since we need to find appropriate number of clusters here, we need to experiment multiple times with different number of clusters, I will use faster and clearer K-means clustering algo.
### Implementation: Creating Clusters
Depending on the problem, the number of clusters that you expect to be in the data may already be known. When the number of clusters is not known *a priori*, there is no guarantee that a given number of clusters best segments the data, since it is unclear what structure exists in the data — if any. However, we can quantify the "goodness" of a clustering by calculating each data point's *silhouette coefficient*. The [silhouette coefficient](http://scikit-learn.org/stable/modules/generated/sklearn.metrics.silhouette_score.html) for a data point measures how similar it is to its assigned cluster from -1 (dissimilar) to 1 (similar). Calculating the *mean* silhouette coefficient provides for a simple scoring method of a given clustering.
In the code block below, you will need to implement the following:
- Fit a clustering algorithm to the `reduced_data` and assign it to `clusterer`.
- Predict the cluster for each data point in `reduced_data` using `clusterer.predict` and assign them to `preds`.
- Find the cluster centers using the algorithm's respective attribute and assign them to `centers`.
- Predict the cluster for each sample data point in `pca_samples` and assign them `sample_preds`.
- Import sklearn.metrics.silhouette_score and calculate the silhouette score of `reduced_data` against `preds`.
- Assign the silhouette score to `score` and print the result.
```
scores = []
# TODO: Apply your clustering algorithm of choice to the reduced data
for num_clusters in range(2,15):
from sklearn.cluster import KMeans
clusterer = KMeans(n_clusters=num_clusters).fit(reduced_data)
# TODO: Predict the cluster for each data point
preds = clusterer.predict(reduced_data)
# TODO: Find the cluster centers
centers = clusterer.cluster_centers_
# TODO: Predict the cluster for each transformed sample data point
sample_preds = clusterer.predict(pca_samples)
# TODO: Calculate the mean silhouette coefficient for the number of clusters chosen
from sklearn.metrics import silhouette_score
score = silhouette_score(reduced_data, preds)
scores.append(score)
scores_series = pd.Series(scores, index = range(2,15))
print scores_series.argmax()
%matplotlib inline
#import matplotlib.pyplot as plt
#plt.bar(range(len(scores)),scores)
score_df = pd.DataFrame(np.array(scores), index=range(2,15), columns= {"Scores"})
score_df.plot(kind='bar')
print score_df
# Use K=2
from sklearn.cluster import KMeans
clusterer = KMeans(n_clusters=2).fit(reduced_data)
preds = clusterer.predict(reduced_data)
centers = clusterer.cluster_centers_
sample_preds = clusterer.predict(pca_samples)
```
### Question 7
*Report the silhouette score for several cluster numbers you tried. Of these, which number of clusters has the best silhouette score?*
**Answer:** I tried the cluster numbers from 2 to 14 as shown above. The best silhoutte score is reported from K=2.
### Cluster Visualization
Once you've chosen the optimal number of clusters for your clustering algorithm using the scoring metric above, you can now visualize the results by executing the code block below. Note that, for experimentation purposes, you are welcome to adjust the number of clusters for your clustering algorithm to see various visualizations. The final visualization provided should, however, correspond with the optimal number of clusters.
```
# Display the results of the clustering from implementation
rs.cluster_results(reduced_data, preds, centers, pca_samples)
```
### Implementation: Data Recovery
Each cluster present in the visualization above has a central point. These centers (or means) are not specifically data points from the data, but rather the *averages* of all the data points predicted in the respective clusters. For the problem of creating customer segments, a cluster's center point corresponds to *the average customer of that segment*. Since the data is currently reduced in dimension and scaled by a logarithm, we can recover the representative customer spending from these data points by applying the inverse transformations.
In the code block below, you will need to implement the following:
- Apply the inverse transform to `centers` using `pca.inverse_transform` and assign the new centers to `log_centers`.
- Apply the inverse function of `np.log` to `log_centers` using `np.exp` and assign the true centers to `true_centers`.
```
# TODO: Inverse transform the centers
log_centers = pca.inverse_transform(centers)
# TODO: Exponentiate the centers
true_centers = np.exp(log_centers)
# Display the true centers
segments = ['Segment {}'.format(i) for i in range(0,len(centers))]
true_centers = pd.DataFrame(np.round(true_centers), columns = data.keys())
true_centers.index = segments
display(true_centers)
```
### Question 8
Consider the total purchase cost of each product category for the representative data points above, and reference the statistical description of the dataset at the beginning of this project. *What set of establishments could each of the customer segments represent?*
**Hint:** A customer who is assigned to `'Cluster X'` should best identify with the establishments represented by the feature set of `'Segment X'`.
**Answer:** The Segment 0 cutomer seems to be showing high projection value on the second principal component in PCA analysis. It has relatively high values for Fresh and Frozen category. However, overall values are smaller than mean for each category. This data point would correspond to small ice-cream parlor.
Segment 1 cutomer has high values on dominating categories for first principal component in PCA. This customer has a high on Milk, Grocery and Detergent_Paper category. This customer probably is a restaurant.
### Question 9
*For each sample point, which customer segment from* ***Question 8*** *best represents it? Are the predictions for each sample point consistent with this?*
Run the code block below to find which cluster each sample point is predicted to be.
```
# Display the predictions
for i, pred in enumerate(sample_preds):
print "Sample point", i, "predicted to be in Cluster", pred
```
**Answer:** The predictions for Sample point 0 and 2 are assigned to cluster 1. This indeed matches the prediction at the start of this assignment where these customers were predicted to be restaurant or caffe.
The prediction for sample point 1 is cluster 0 which I predicted to be small ice-cream parlor. However, at the start I predicted it to be a small grocery shop given high consumption of Fresh. However, the profile do match for this cluster 0 and this sample data point with high fresh and frozen components.
## Conclusion
In this final section, you will investigate ways that you can make use of the clustered data. First, you will consider how the different groups of customers, the ***customer segments***, may be affected differently by a specific delivery scheme. Next, you will consider how giving a label to each customer (which *segment* that customer belongs to) can provide for additional features about the customer data. Finally, you will compare the ***customer segments*** to a hidden variable present in the data, to see whether the clustering identified certain relationships.
### Question 10
Companies will often run [A/B tests](https://en.wikipedia.org/wiki/A/B_testing) when making small changes to their products or services to determine whether making that change will affect its customers positively or negatively. The wholesale distributor is considering changing its delivery service from currently 5 days a week to 3 days a week. However, the distributor will only make this change in delivery service for customers that react positively. *How can the wholesale distributor use the customer segments to determine which customers, if any, would react positively to the change in delivery service?*
**Hint:** Can we assume the change affects all customers equally? How can we determine which group of customers it affects the most?
**Answer:** Based on the profile of each segment the change in delivery serive of different groups should affect differently. We can use A/B tests for each cluster to determine the results of changes in delivery service.
First we choose a random sample from each of the cluster (treatment group) and use the remaining points in the cluster as control group. Then apply the changes in delivery services to the treatment group and can find if the customer segment will affect positively or negatively for the cluster.
### Question 11
Additional structure is derived from originally unlabeled data when using clustering techniques. Since each customer has a ***customer segment*** it best identifies with (depending on the clustering algorithm applied), we can consider *'customer segment'* as an **engineered feature** for the data. Assume the wholesale distributor recently acquired ten new customers and each provided estimates for anticipated annual spending of each product category. Knowing these estimates, the wholesale distributor wants to classify each new customer to a ***customer segment*** to determine the most appropriate delivery service.
*How can the wholesale distributor label the new customers using only their estimated product spending and the* ***customer segment*** *data?*
**Hint:** A supervised learner could be used to train on the original customers. What would be the target variable?
**Answer:** To know the customer segment of a new customer any supervised learning algo can be used (preferably with non-linear hidden variables) with cluster id as the target variable.
Also, the model learned while cluster creation can be used for predicting the customer segment. E.g. if K-means was used, the new customer can be assined a customer id based on distance of the customer attributes to the center of K-clusters.
### Visualizing Underlying Distributions
At the beginning of this project, it was discussed that the `'Channel'` and `'Region'` features would be excluded from the dataset so that the customer product categories were emphasized in the analysis. By reintroducing the `'Channel'` feature to the dataset, an interesting structure emerges when considering the same PCA dimensionality reduction applied earlier to the original dataset.
Run the code block below to see how each data point is labeled either `'HoReCa'` (Hotel/Restaurant/Cafe) or `'Retail'` the reduced space. In addition, you will find the sample points are circled in the plot, which will identify their labeling.
```
# Display the clustering results based on 'Channel' data
rs.channel_results(reduced_data, outliers, pca_samples)
```
### Question 12
*How well does the clustering algorithm and number of clusters you've chosen compare to this underlying distribution of Hotel/Restaurant/Cafe customers to Retailer customers? Are there customer segments that would be classified as purely 'Retailers' or 'Hotels/Restaurants/Cafes' by this distribution? Would you consider these classifications as consistent with your previous definition of the customer segments?*
**Answer:** The above plot shows the clusters which matches quite well with the number of clusters identified earlier in the analysis. Also the distibution of clusters matches a lot.
One observation here is, near the virtual boundary of clusters in my earlier analysis, there are quite a few customers which were not classified consistent to the above plot. One of the reason being, the clustering algorithm used (K-means) assign hard cluster to each point. However, assigning a probablilty (e.g. using EM algo) would have done more justice to such points.
> **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to
**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
| github_jupyter |
```
from __future__ import division
import theano
import theano.tensor as T
import theano.tensor.signal.conv
import numpy as np
import cv2, scipy, time, os
from tqdm import tqdm
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
from my_utils import Progbar
from skimage import measure
import matlab_wrapper
###################
#### required functions
###################
def floatX(X):
return np.asarray(X, dtype=theano.config.floatX)
## for computing loss of one step
def onepari(grayv_i, l_i, a_i, b_i, R_i, G_i, B_i, x_i, y_i, n_i, grayv, l, a, b, R, G, B, x, y, n, width, height, alpha, beta, gamma):
## non-linearity of the color contrast
l_delta = alpha[0]*(l_i - l)
a_delta = alpha[1]*(a_i - a)
b_delta = alpha[2]*(b_i - b)
# s_delta = (alpha[0] * T.abs_(l_delta) + alpha[1] * T.abs_(a_delta) + alpha[2]*T.abs_(b_delta))/(sum(alpha))
s_delta = T.sqrt(T.sqr(l_delta) + T.sqr(a_delta) + T.sqr(b_delta)) # here can be nonlinearity, can separate l,a,b channels
## non-linearity of gray_delta
# g_delta = 1 / np.log(1/beta + 1) / (grayv + beta) * (grayv_i - grayv)
g_delta = beta / (grayv + beta) * (grayv_i - grayv) # beta controls the nonlinearity of grayv
# g_delta = grayv_i - grayv
## assign sign flag to the g_delta
R_delta = R_i - R
G_delta = G_i - G
B_delta = B_i - B
flag = 1 * T.and_(T.and_(T.gt(R_delta, 0),T.gt(G_delta,0)),T.gt(B_delta,0)) + (-1)*T.and_(T.and_(T.lt(R_delta, 0),T.lt(G_delta,0)),T.lt(B_delta,0))
g_delta_flag = T.switch(T.eq(flag, 0), T.abs_(g_delta), flag*g_delta)
## non-linearity of the region importance weight
w_i = n_i * n / T.sqr(0.01 * width * height)
# w_i = T.log(n_i * n / T.sqr(0.01 * n_sum)) # nonlinear version of the weight
# w_i = w_i * T.gt(w_i,0)
## the non-linear distance weight
dist = gamma * (T.sqr(x_i - x) + T.sqr(y_i - y))
diag = (T.sqr(width) + T.sqr(height))
w_dist = T.exp(-dist/diag)
## final contrast loss
# loss =T.sum(w_dist * w_i * T.sqr(g_delta_flag - s_delta))
loss =T.sum(w_dist * w_i * T.sqr(g_delta_flag - s_delta))
return loss
## build up the loss function
def buildloss(alpha,beta,gamma):
w = []
for i in range(9):
if i < 3:
w.append(theano.shared(floatX(1/3)))
else:
w.append(theano.shared(floatX(0)))
lr = theano.shared(floatX(0.01))
R = T.fvector('R')
G = T.fvector('G')
B = T.fvector('B')
l = T.fvector('l')
a = T.fvector('a')
b = T.fvector('b')
x = T.fvector('x')
y = T.fvector('y')
n = T.fvector('n')
width = T.fscalar('width')
height = T.fscalar('height')
grayv = w[0] * R + w[1] * G + w[2]*B + w[3] *R*G + w[4]*G*B + w[5]*B*R + w[6]*(T.sqr(R)) + w[7]*(T.sqr(G)) +w[8]*(T.sqr(B))
loss_c, updates = theano.scan(fn=onepari,
outputs_info=None,
sequences=[grayv, l, a, b, R, G, B, x, y, n],
non_sequences=[grayv, l, a, b, R, G, B, x, y, n, width, height, alpha, beta, gamma])
loss_contrast = T.sum(loss_c)/2
loss_contrast = loss_contrast
# loss_contrast = loss_contrast + T.sum(1e5 * T.gt(grayv,1)*(grayv-1) + 1e5 * T.lt(grayv,0)*(0 - grayv))
w_update, m_previous, v_previous, t = Adam(loss_contrast,w,learning_rate=lr)
outputs = []
outputs.append(loss_contrast)
func = theano.function([R,G,B,l,a,b,x,y,n,width,height],outputs=outputs,updates=w_update)
return func,w,lr,m_previous,v_previous, t
def Adam(loss, all_params, learning_rate=0.001, b1=0.9, b2=0.999, e=1e-8,
gamma=1-1e-8):
"""
ADAM update rules
Default values are taken from [Kingma2014]
References:
[Kingma2014] Kingma, Diederik, and Jimmy Ba.
"Adam: A Method for Stochastic Optimization."
arXiv preprint arXiv:1412.6980 (2014).
http://arxiv.org/pdf/1412.6980v4.pdf
"""
updates = []
all_grads = theano.grad(loss, all_params)
alpha = learning_rate
t = theano.shared(np.float32(1))
b1_t = b1*gamma**(t-1) #(Decay the first moment running average coefficient)
m_previous_v = []
v_previous_v = []
for theta_previous, g in zip(all_params, all_grads):
m_previous = theano.shared(np.zeros(theta_previous.get_value().shape,
dtype=theano.config.floatX))
v_previous = theano.shared(np.zeros(theta_previous.get_value().shape,
dtype=theano.config.floatX))
m = b1_t*m_previous + (1 - b1_t)*g # (Update biased first moment estimate)
v = b2*v_previous + (1 - b2)*g**2 # (Update biased second raw moment estimate)
m_hat = m / (1-b1**t) # (Compute bias-corrected first moment estimate)
v_hat = v / (1-b2**t) # (Compute bias-corrected second raw moment estimate)
theta = theta_previous - (alpha * m_hat) / (T.sqrt(v_hat) + e) #(Update parameters)
updates.append((m_previous, m))
updates.append((v_previous, v))
updates.append((theta_previous, theta) )
m_previous_v.append(m_previous)
v_previous_v.append(v_previous)
updates.append((t, t + 1.))
return updates, m_previous_v,v_previous_v,t
def color2gray(path_images, img_name,lr_init=1,n_iter=1000):
## load the images
img = cv2.imread(path_images + img_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
lab_img = cv2.cvtColor(img, cv2.COLOR_RGB2LAB)
R_img = img[:,:,0].astype(np.float32) / 255
G_img = img[:,:,1].astype(np.float32) / 255
B_img = img[:,:,2].astype(np.float32) /255
l_img = (lab_img[:,:,0]).astype(np.float32) / 255
a_img = (lab_img[:,:,1]).astype(np.float32) / 255
b_img = (lab_img[:,:,2]).astype(np.float32) / 255
sz = img.shape
n_p = sz[0] * sz[1]
# print sz, n_p
## automatically determine the numner of clusters
matlab.put('filename', path_images + img_name)
matlab.put('cdist', 30)
matlab.put('minsize', 20)
matlab.eval('findkmeans')
numklabels = matlab.get('numklabels')
klabels = matlab.get('klabels')
numclabels = matlab.get('numclabels')
clabels = matlab.get('clabels')
# print 'number of k clusters: ', numklabels
# print 'number of c clusters: ', numclabels
# klabels = clabels
# numklabels = numclabels
## build the statistics
grid = np.indices((sz[0], sz[1]),dtype=np.float32)
xgrid = grid[0]
ygrid = grid[1]
n_pixels = np.zeros((numklabels,)).astype(np.float32)
R_ini = np.zeros((numklabels,)).astype(np.float32)
G_ini = np.zeros((numklabels,)).astype(np.float32)
B_ini = np.zeros((numklabels,)).astype(np.float32)
l_ini = np.zeros((numklabels,)).astype(np.float32)
a_ini = np.zeros((numklabels,)).astype(np.float32)
b_ini = np.zeros((numklabels,)).astype(np.float32)
x_ini = np.zeros((numklabels,)).astype(np.float32)
y_ini = np.zeros((numklabels,)).astype(np.float32)
for i in range(numklabels):
n_pixels[i] = np.sum(klabels==i)
R_ini[i] = np.sum(R_img * (klabels == i))/n_pixels[i]
G_ini[i] = np.sum(G_img * (klabels == i))/n_pixels[i]
B_ini[i] = np.sum(B_img * (klabels == i))/n_pixels[i]
l_ini[i] = np.sum(l_img * (klabels == i))/n_pixels[i]
a_ini[i] = np.sum(a_img * (klabels == i))/n_pixels[i]
b_ini[i] = np.sum(b_img * (klabels == i))/n_pixels[i]
x_ini[i] = np.sum(xgrid * (klabels == i))/n_pixels[i]
y_ini[i] = np.sum(ygrid * (klabels == i))/n_pixels[i]
# w_all = ((n_pixels.sum()**2 - n_pixels.dot(n_pixels)) / 2).astype(np.float32)
## compute the adjacency matrix between colors
# w_ij = np.zeros((numklabels,numklabels)).astype(np.float32)
# for i in range(sz[0]-1):
# for j in range(sz[1]-1):
# w_ij[klabels[i,j],klabels[i+1,j]] = w_ij[klabels[i,j],klabels[i+1,j]] + 1
# w_ij[klabels[i,j],klabels[i,j+1]] = w_ij[klabels[i,j],klabels[i,j+1]] + 1
# np.fill_diagonal(w_ij,0)
# w_ij = w_ij + w_ij.T
# w_ij = (w_ij.T / np.sum(w_ij,axis=1)).T
## draw the clustering results
# fig = plt.figure(figsize=(20, 4))
# ax = fig.add_subplot(141)
# ax.imshow(img)
# ax.set_title('ori')
# ax.axis('off')
# ax = fig.add_subplot(142)
# ax.imshow(klabels,cmap='gray')
# ax.set_title('clusters')
# ax.axis('off')
# g_ini = np.copy(l_ini)
# imgg_ini = draw_gray(klabels,g_ini,mask)
# ax = fig.add_subplot(143)
# ax.imshow(imgg_ini,cmap='gray')
# ax.set_title('initial')
# ax.axis('off')
# imgc_ini = draw_color(klabels,img,mask)
# ax = fig.add_subplot(144)
# ax.imshow(imgc_ini.astype(np.uint8),vmin=0,vmax=255)
# ax.set_title('initial')
# ax.axis('off')
# plt.show()
## perform the decolorization optimization
#### initialize the parameters, the learning rate and the shared variable for Adam
for i in range(9):
if i < 3:
w[i].set_value(floatX(1/3))
else:
w[i].set_value(floatX(0))
wv = [i.get_value() for i in w]
# print wv
lr.set_value(lr_init)
# print lr.get_value()
for i in range(9):
m_previous[i].set_value(floatX(0))
v_previous[i].set_value(floatX(0))
t.set_value(floatX(1))
m_previous_vtemp = [i.get_value() for i in m_previous]
v_previous_vtemp = [i.get_value() for i in v_previous]
# print m_previous_vtemp
# print v_previous_vtemp
# print t.get_value()
## perform the optimization
# progbar = Progbar(n_iter,verbose=1)
for i in range(n_iter):
loss = func(R_ini,G_ini,B_ini,l_ini,a_ini,b_ini,x_ini,y_ini,n_pixels,sz[0],sz[1])
# progbar.add(1., values=[("train loss", loss[0]),])
wv = [i.get_value() for i in w]
# print wv
## draw the decolorization results
img_g = wv[0] * R_img + wv[1] * G_img + wv[2]*B_img + wv[3] *R_img*G_img + wv[4]*G_img*B_img + wv[5]*B_img*R_img + wv[6]*(R_img**2) + wv[7]*(G_img**2) +wv[8]*(B_img**2)
return img_g
def scan_parameters():
for lr_init in [0.1,0.01,0.001,1]:
for alpha in [[1,1,1],[2,1,1],[3,1,1],[1,2,2],[1,3,3]]:
for beta in [2,1,0.5,0.1]:
for gamma in [10,5,1,0.5,0.1]:
yield (lr_init,alpha,beta,gamma)
###########
## Build up the theano function and the matlab session
###########
func,w,lr,m_previous,v_previous, t = buildloss([2,1,1],2,0)
matlab = matlab_wrapper.MatlabSession()
## process all images in the folder
test_idx = '1'
# path_base = '/data/bjin/MyDecolor/dataset/Cadik/'
path_base = '/data/bjin/MyDecolor/dataset/CSDD_Dataset/'
path_images = path_base + 'images/'
path_results_CIEL = path_base + 'CIEL/'
path_results_mine = path_base + 'mine/test' + test_idx + '/'
path_results_2012Lu = path_base + '2012_Lu/'
path_results_2013Song = path_base + '2013_Song/'
path_results_2015Du = path_base + '2015_Du/'
path_results_2015Liu = path_base + '2015_Liu/'
lr_init = 0.0005
n_iter = 1000
if not os.path.isdir(path_results_mine):
os.mkdir(path_results_mine)
files = os.listdir(path_images)
files.sort()
n_files = len(files)
for i in tqdm(range(n_files)):
img_name = files[i]
if img_name[0] is '.':
continue
# print '************processing : ' + img_name + '************'
try:
img_g = color2gray(path_images, img_name,lr_init=lr_init,n_iter=n_iter)
except:
matlab = matlab_wrapper.MatlabSession()
img_g = color2gray(path_images, img_name,lr_init=lr_init,n_iter=n_iter)
img_g = (img_g - np.min(img_g))/(np.max(img_g) - np.min(img_g))
## plot the respective results
# fig = plt.figure(figsize=(20, 4))
# ax = fig.add_subplot(161)
# img = cv2.imread(path_images + img_name)
# img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# ax.imshow(img)
# ax.set_title('ori')
# ax.axis('off')
# ax = fig.add_subplot(162)
# img_2012Lu = cv2.imread(path_results_2012Lu + img_name)
# ax.imshow(img_2012Lu,cmap='gray')
# ax.set_title('2012 Lu')
# ax.axis('off')
# ax = fig.add_subplot(163)
# img_2013Song = cv2.imread(path_results_2013Song + img_name)
# ax.imshow(img_2013Song,cmap='gray')
# ax.set_title('2013 Song')
# ax.axis('off')
# ax = fig.add_subplot(164)
# img_2015Du = cv2.imread(path_results_2015Du + img_name)
# ax.imshow(img_2015Du,cmap='gray')
# ax.set_title('2015 Du')
# ax.axis('off')
# ax = fig.add_subplot(165)
# # img_2015Liu = cv2.imread(path_results_2015Liu + img_name)
# img_2015Liu = cv2.imread(path_results_CIEL + img_name)
# ax.imshow(img_2015Liu,cmap='gray')
# ax.set_title('CIEL')
# ax.axis('off')
# ax = fig.add_subplot(166)
# ax.imshow(img_g,cmap='gray')
# ax.set_title('mine')
# ax.axis('off')
# plt.show()
cv2.imwrite(path_results_mine+img_name,(img_g*255).astype(np.uint8))
## evaluation
matlab = matlab_wrapper.MatlabSession()
matlab.put('thrNum', 15)
matlab.put('imNum', 24)
matlab.put('path_base', path_base)
matlab.put('path_results_mine', path_results_mine)
matlab.eval('Computemetrics')
CCPR = matlab.get('CCPR')
CCFR = matlab.get('CCFR')
Escore = matlab.get('Escore')
print 'mine ' + '2012 Lu ' + '2013 Song ' + '2015 Du ' + '2015 Liu '
print CCPR
path_base = '/data/bjin/MyDecolor/dataset/CSDD_Dataset/'
path_images = path_base + 'images/'
img_name = '8.png'
img_g = color2gray(path_images, img_name,lr_init=0.0005,n_iter=2000)
fig = plt.figure(figsize=(20, 4))
ax = fig.add_subplot(121)
img = cv2.imread(path_images + img_name)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
ax.imshow(img)
ax.set_title('ori')
ax.axis('off')
ax = fig.add_subplot(122)
ax.imshow(img_g,cmap='gray')
ax.set_title('mine')
ax.axis('off')
plt.show()
###########
## Main functions
###########
test_idx = '3'
path_base = '/data/bjin/MyDecolor/dataset/Cadik/'
path_images = path_base + 'images/'
path_results_mine = path_base + 'mine/test' + test_idx + '/'
path_results_2012Lu = path_base + '2012_Lu/'
path_results_2013Song = path_base + '2013_Song/'
path_results_2015Du = path_base + '2015_Du/'
path_results_2015Liu = path_base + '2015_Liu/'
lr_init = 0.1
n_iter = 1000
fptr = open(path_base + 'logs/Decolor_test' + test_idx + '.txt','w+')
print 'writing logs to ' + path_base + 'logs/Decolor_test' + test_idx
if not os.path.isdir(path_results_mine):
os.mkdir(path_results_mine)
files = os.listdir(path_images)
files.sort()
n_files = len(files)
for (alpha,beta,gamma) in scan_parameters():
print alpha, beta, gamma
print >>fptr, '*'*20
print >>fptr, alpha, beta
func,w,lr,m_previous,v_previous,t = buildloss(alpha,beta,gamma)
matlab = matlab_wrapper.MatlabSession()
## generate the gray scale images
for img_name in files:
if img_name[0] is '.':
continue
print '----------processing: ' + img_name + '---------'
try:
img_g = color2gray(path_images, img_name,lr_init=lr_init,n_iter=n_iter)
except:
matlab = matlab_wrapper.MatlabSession()
img_g = color2gray(path_images, img_name,lr_init=lr_init,n_iter=n_iter)
img_g = (img_g - np.min(img_g))/(np.max(img_g) - np.min(img_g))
cv2.imwrite(path_results_mine+img_name,(img_g*255).astype(np.uint8))
## evaluation
matlab = matlab_wrapper.MatlabSession()
matlab.put('thrNum', 15)
matlab.put('imNum', 24)
matlab.put('path_base', path_base)
matlab.put('path_results_mine', path_results_mine)
matlab.eval('Computemetrics')
CCPR = matlab.get('CCPR')
CCFR = matlab.get('CCFR')
Escore = matlab.get('Escore')
print 'mine ' + '2012 Lu ' + '2013 Song ' + '2015 Du ' + '2015 Liu '
print CCPR
print >>fptr, 'CCPR'
print >>fptr, CCPR
print >>fptr, 'CCFR'
print >>fptr, CCFR
print >>fptr, 'Escore'
print >>fptr, Escore
print >>fptr, '*'*20
fptr.flush()
os.fsync(fptr.fileno())
fptr.close()
```
| github_jupyter |
```
%matplotlib inline
```
# Simple Oscillator Example
This example shows the most simple way of using a solver.
We solve free vibration of a simple oscillator:
$$m \ddot{u} + k u = 0,\quad u(0) = u_0,\quad \dot{u}(0) = \dot{u}_0$$
using the CVODE solver. An analytical solution exists, given by
$$u(t) = u_0 \cos\left(\sqrt{\frac{k}{m}} t\right)+\frac{\dot{u}_0}{\sqrt{\frac{k}{m}}} \sin\left(\sqrt{\frac{k}{m}} t\right)$$
```
from __future__ import print_function
import matplotlib.pyplot as plt
import numpy as np
from scikits.odes import ode
#data of the oscillator
k = 4.0
m = 1.0
#initial position and speed data on t=0, x[0] = u, x[1] = \dot{u}, xp = \dot{x}
initx = [1, 0.1]
```
We need a first order system, so convert the second order system
$$m \ddot{u} + k u = 0,\quad u(0) = u_0,\quad \dot{u}(0) = \dot{u}_0$$
into
$$\left\{ \begin{array}{l}
\dot u = v\\
\dot v = \ddot u = -\frac{ku}{m}
\end{array} \right.$$
You need to define a function that computes the right hand side of above equation:
```
def rhseqn(t, x, xdot):
""" we create rhs equations for the problem"""
xdot[0] = x[1]
xdot[1] = - k/m * x[0]
```
To solve the ODE you define an ode object, specify the solver to use, here cvode, and pass the right hand side function. You request the solution at specific timepoints by passing an array of times to the solve member.
```
solver = ode('cvode', rhseqn, old_api=False)
solution = solver.solve([0., 1., 2.], initx)
print('\n t Solution Exact')
print('------------------------------------')
for t, u in zip(solution.values.t, solution.values.y):
print('{0:>4.0f} {1:15.6g} {2:15.6g}'.format(t, u[0],
initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m)))
```
You can continue the solver by passing further times. Calling the solve routine reinits the solver, so you can restart at whatever time. To continue from the last computed solution, pass the last obtained time and solution.
**Note:** The solver performes better if it can take into account history information, so avoid calling solve to continue computation!
In general, you must check for errors using the errors output of solve.
```
#Solve over the next hour by continuation
times = np.linspace(0, 3600, 61)
times[0] = solution.values.t[-1]
solution = solver.solve(times, solution.values.y[-1])
if solution.errors.t:
print ('Error: ', solution.message, 'Error at time', solution.errors.t)
print ('Computed Solutions:')
print('\n t Solution Exact')
print('------------------------------------')
for t, u in zip(solution.values.t, solution.values.y):
print('{0:>4.0f} {1:15.6g} {2:15.6g}'.format(t, u[0],
initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m)))
```
The solution fails at a time around 24 seconds. Erros can be due to many things. Here however the reason is simple: we try to make too large jumps in time output. Increasing the allowed steps the solver can take will fix this. This is the **max_steps** option of cvode:
```
solver = ode('cvode', rhseqn, old_api=False, max_steps=5000)
solution = solver.solve(times, solution.values.y[-1])
if solution.errors.t:
print ('Error: ', solution.message, 'Error at time', solution.errors.t)
print ('Computed Solutions:')
print('\n t Solution Exact')
print('------------------------------------')
for t, u in zip(solution.values.t, solution.values.y):
print('{0:>4.0f} {1:15.6g} {2:15.6g}'.format(t, u[0],
initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m)))
```
To plot the simple oscillator, we show a (t,x) plot of the solution. Doing this over 60 seconds can be done as follows:
```
#plot of the oscilator
solver = ode('cvode', rhseqn, old_api=False)
times = np.linspace(0,60,600)
solution = solver.solve(times, initx)
plt.plot(solution.values.t,[x[0] for x in solution.values.y])
plt.xlabel('Time [s]')
plt.ylabel('Position [m]')
plt.show()
```
You can refine the tolerances from their defaults to obtain more accurate solutions
```
options1= {'rtol': 1e-6, 'atol': 1e-12, 'max_steps': 50000} # default rtol and atol
options2= {'rtol': 1e-15, 'atol': 1e-25, 'max_steps': 50000}
solver1 = ode('cvode', rhseqn, old_api=False, **options1)
solver2 = ode('cvode', rhseqn, old_api=False, **options2)
solution1 = solver1.solve([0., 1., 60], initx)
solution2 = solver2.solve([0., 1., 60], initx)
print('\n t Solution1 Solution2 Exact')
print('-----------------------------------------------------')
for t, u1, u2 in zip(solution1.values.t, solution1.values.y, solution2.values.y):
print('{0:>4.0f} {1:15.8g} {2:15.8g} {3:15.8g}'.format(t, u1[0], u2[0],
initx[0]*np.cos(np.sqrt(k/m)*t)+initx[1]*np.sin(np.sqrt(k/m)*t)/np.sqrt(k/m)))
```
# Simple Oscillator Example: Stepwise running
When using the *solve* method, you solve over a period of time you decided before. In some problems you might want to solve and decide on the output when to stop. Then you use the *step* method. The same example as above using the step method can be solved as follows.
You define the ode object selecting the cvode solver. You initialize the solver with the begin time and initial conditions using *init_step*. You compute solutions going forward with the *step* method.
```
solver = ode('cvode', rhseqn, old_api=False)
time = 0.
solver.init_step(time, initx)
plott = []
plotx = []
while True:
time += 0.1
# fix roundoff error at end
if time > 60: time = 60
solution = solver.step(time)
if solution.errors.t:
print ('Error: ', solution.message, 'Error at time', solution.errors.t)
break
#we store output for plotting
plott.append(solution.values.t)
plotx.append(solution.values.y[0])
if time >= 60:
break
plt.plot(plott,plotx)
plt.xlabel('Time [s]')
plt.ylabel('Position [m]')
plt.show()
```
The solver interpolates solutions to return the solution at the required output times:
```
print ('plott length:', len(plott), ', last computation times:', plott[-15:]);
```
# Simple Oscillator Example: Internal Solver Stepwise running
When using the *solve* method, you solve over a period of time you decided before. With the *step* method you solve by default towards a desired output time after which you can continue solving the problem.
For full control, you can also compute problems using the solver internal steps. This is not advised, as the number of return steps can be very large, **slowing down** the computation enormously. If you want this nevertheless, you can achieve it with the *one_step_compute* option. Like this:
```
solver = ode('cvode', rhseqn, old_api=False, one_step_compute=True)
time = 0.
solver.init_step(time, initx)
plott = []
plotx = []
while True:
solution = solver.step(60)
if solution.errors.t:
print ('Error: ', solution.message, 'Error at time', solution.errors.t)
break
#we store output for plotting
plott.append(solution.values.t)
plotx.append(solution.values.y[0])
if solution.values.t >= 60:
#back up to 60
solver.set_options(one_step_compute=False)
solution = solver.step(60)
plott[-1] = solution.values.t
plotx[-1] = solution.values.y[0]
break
plt.plot(plott,plotx)
plt.xlabel('Time [s]')
plt.ylabel('Position [m]')
plt.show()
```
By inspection of the returned times you can see how efficient the solver can solve this problem:
```
print ('plott length:', len(plott), ', last computation times:', plott[-15:]);
```
| github_jupyter |
```
import classifierMLP as cmlp
import os
import struct
import numpy as np
def load_mnist(path, kind='train'):
"""Load MNIST data from `path`"""
labels_path = os.path.join(path,
'%s-labels-idx1-ubyte' % kind)
images_path = os.path.join(path,
'%s-images-idx3-ubyte' % kind)
print(labels_path)
print(images_path)
with open(labels_path, 'rb') as lbpath:
magic, n = struct.unpack('>II',
lbpath.read(8))
labels = np.fromfile(lbpath,
dtype=np.uint8)
with open(images_path, 'rb') as imgpath:
magic, num, rows, cols = struct.unpack(">IIII",
imgpath.read(16))
images = np.fromfile(imgpath,
dtype=np.uint8).reshape(len(labels), 784)
images = ((images / 255.) - .5) * 2
return images, labels
# unzips mnist
%matplotlib inline
import sys
import gzip
import shutil
if (sys.version_info > (3, 0)):
writemode = 'wb'
else:
writemode = 'w'
zipped_mnist = [f for f in os.listdir('./') if f.endswith('ubyte.gz')]
for z in zipped_mnist:
with gzip.GzipFile(z, mode='rb') as decompressed, open(z[:-3], writemode) as outfile:
outfile.write(decompressed.read())
X_train, y_train = load_mnist('', kind='train')
print('Rows: %d, columns: %d' % (X_train.shape[0], X_train.shape[1]))
X_test, y_test = load_mnist('', kind='t10k')
print('Rows: %d, columns: %d' % (X_test.shape[0], X_test.shape[1]))
X_train.shape
import matplotlib.pyplot as plt
fig, ax = plt.subplots(nrows=2, ncols=5, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(10):
img = X_train[y_train == i][0].reshape(28, 28)
ax[i].imshow(img, cmap='Greys')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('images/12_5.png', dpi=300)
plt.show()
fig, ax = plt.subplots(nrows=7, ncols=12, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(84):
img = X_train[y_train == 4][i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('images/12_6.png', dpi=300)
plt.show()
import seaborn as sns
sns.countplot(y_train)
n_epochs = 100
nn = cmlp.SimpleMLP(n_hidden_units=100,
l2=0.01,
epochs=n_epochs,
eta=0.0005,
minibatch_size=100,
shuffle=True,
seed=1)
nn.fit(X_train=X_train[:55000],
y_train=y_train[:55000],
X_valid=X_train[55000:],
y_valid=y_train[55000:])
#playing with the traiued model
fig, ax = plt.subplots(nrows=5, ncols=4, sharex=True, sharey=True,)
ax = ax.flatten()
for i in range(20):
img = X_test[i].reshape(28, 28)
ax[i].imshow(img, cmap='Greys')
ax[0].set_xticks([])
ax[0].set_yticks([])
plt.tight_layout()
# plt.savefig('images/12_6.png', dpi=300)
plt.show()
#Lets test for X_test - 1 to 20
for i in range(20):
print ("Prediction for {}th image is {}".format(i,
nn.predict(X_test[i:i+1])))
import matplotlib.pyplot as plt
plt.plot(range(nn.epochs), nn.eval_['cost'],color='green' ,
label='training Error')
plt.ylabel('Error')
plt.xlabel('Epochs')
plt.legend()
import matplotlib.pyplot as plt
plt.plot(range(nn.epochs), nn.eval_['train_acc'],color='green' ,
label='training')
plt.plot(range(nn.epochs), nn.eval_['valid_acc'], color='red',
label='validation', linestyle='--')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend()
#plt.savefig('images/12_08.png', dpi=300)
plt.show()
```
| github_jupyter |
# Siamese networks with TensorFlow 2.0/Keras
In this example, we'll implement a simple siamese network system, which verifyies whether a pair of MNIST images is of the same class (true) or not (false).
_This example is partially based on_ [https://github.com/keras-team/keras/blob/master/examples/mnist_siamese.py](https://github.com/keras-team/keras/blob/master/examples/mnist_siamese.py)
Let's start with the imports
```
import random
import numpy as np
import tensorflow as tf
```
We'll continue with the `create_pairs` function, which creates a training dataset of equal number of true/false pairs of each MNIST class.
```
def create_pairs(inputs: np.ndarray, labels: np.ndarray):
"""Create equal number of true/false pairs of samples"""
num_classes = 10
digit_indices = [np.where(labels == i)[0] for i in range(num_classes)]
pairs = list()
labels = list()
n = min([len(digit_indices[d]) for d in range(num_classes)]) - 1
for d in range(num_classes):
for i in range(n):
z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
pairs += [[inputs[z1], inputs[z2]]]
inc = random.randrange(1, num_classes)
dn = (d + inc) % num_classes
z1, z2 = digit_indices[d][i], digit_indices[dn][i]
pairs += [[inputs[z1], inputs[z2]]]
labels += [1, 0]
return np.array(pairs), np.array(labels, dtype=np.float32)
```
Next, we'll define the base network of the siamese system:
```
def create_base_network():
"""The shared encoding part of the siamese network"""
return tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.1),
tf.keras.layers.Dense(64, activation='relu'),
])
```
Next, let's load the regular MNIST training and validation sets and create true/false pairs out of them:
```
# Load the train and test MNIST datasets
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.astype(np.float32)
x_test = x_test.astype(np.float32)
x_train /= 255
x_test /= 255
input_shape = x_train.shape[1:]
# Create true/false training and testing pairs
train_pairs, tr_labels = create_pairs(x_train, y_train)
test_pairs, test_labels = create_pairs(x_test, y_test)
```
Then, we'll build the siamese system, which includes the `base_network`, the 2 siamese paths `encoder_a` and `encoder_b`, the `l1_dist` measure, and the combined `model`:
```
# Create the siamese network
# Start from the shared layers
base_network = create_base_network()
# Create first half of the siamese system
input_a = tf.keras.layers.Input(shape=input_shape)
# Note how we reuse the base_network in both halfs
encoder_a = base_network(input_a)
# Create the second half of the siamese system
input_b = tf.keras.layers.Input(shape=input_shape)
encoder_b = base_network(input_b)
# Create the the distance measure
l1_dist = tf.keras.layers.Lambda(
lambda embeddings: tf.keras.backend.abs(embeddings[0] - embeddings[1])) \
([encoder_a, encoder_b])
# Final fc layer with a single logistic output for the binary classification
flattened_weighted_distance = tf.keras.layers.Dense(1, activation='sigmoid') \
(l1_dist)
# Build the model
model = tf.keras.models.Model([input_a, input_b], flattened_weighted_distance)
```
Finally, we can train the model and check the validation accuracy, which reaches 99.37%:
```
# Train
model.compile(loss='binary_crossentropy',
optimizer=tf.keras.optimizers.Adam(),
metrics=['accuracy'])
model.fit([train_pairs[:, 0], train_pairs[:, 1]], tr_labels,
batch_size=128,
epochs=20,
validation_data=([test_pairs[:, 0], test_pairs[:, 1]], test_labels))
```
| github_jupyter |
# BBoxerwGradCAM
### This class forms boundary boxes (rectangle and polygon) using GradCAM outputs for a given image.
The purpose of this class is to develop Rectangle and Polygon coordinates that define an object based on an image classification model. The 'automatic' creation of these coordinates, which are often included in COCO JSONs used to train object detection models, is valuable because data preparation and labeling can be a time consuming task.
### This class takes 5 user inputs:
* **Pretrained Learner** (image classification model)
* **GradCAM Heatmap** (heatmap of GradCAM object - formed by a pretrained image classification learner)
* **Source Image**
* **Image Resizing Scale** (also applied to corresponding GradCAM heatmap)
* **BBOX Rectangle Resizing Scale**
*Class is compatible with google colab and other Python 3 enivronments*
```
# Imports for loading learner and the GradCAM class
from fastai import *
from fastai.vision import *
from fastai.callbacks.hooks import *
import scipy.ndimage
```
The following cell contains the widely used GradCAM class for pretrained image classification models (unedited).
```
#@title GradCAM Class
class GradCam():
@classmethod
def from_interp(cls,learn,interp,img_idx,ds_type=DatasetType.Valid,include_label=False):
# produce heatmap and xb_grad for pred label (and actual label if include_label is True)
if ds_type == DatasetType.Valid:
ds = interp.data.valid_ds
elif ds_type == DatasetType.Test:
ds = interp.data.test_ds
include_label=False
else:
return None
x_img = ds.x[img_idx]
xb,_ = interp.data.one_item(x_img)
xb_img = Image(interp.data.denorm(xb)[0])
probs = interp.preds[img_idx].numpy()
pred_idx = interp.pred_class[img_idx].item() # get class idx of img prediction label
hmap_pred,xb_grad_pred = get_grad_heatmap(learn,xb,pred_idx,size=xb_img.shape[-1])
prob_pred = probs[pred_idx]
actual_args=None
if include_label:
actual_idx = ds.y.items[img_idx] # get class idx of img actual label
if actual_idx!=pred_idx:
hmap_actual,xb_grad_actual = get_grad_heatmap(learn,xb,actual_idx,size=xb_img.shape[-1])
prob_actual = probs[actual_idx]
actual_args=[interp.data.classes[actual_idx],prob_actual,hmap_actual,xb_grad_actual]
return cls(xb_img,interp.data.classes[pred_idx],prob_pred,hmap_pred,xb_grad_pred,actual_args)
@classmethod
def from_one_img(cls,learn,x_img,label1=None,label2=None):
'''
learn: fastai's Learner
x_img: fastai.vision.image.Image
label1: generate heatmap according to this label. If None, this wil be the label with highest probability from the model
label2: generate additional heatmap according to this label
'''
pred_class,pred_idx,probs = learn.predict(x_img)
label1= str(pred_class) if not label1 else label1
xb,_ = learn.data.one_item(x_img)
xb_img = Image(learn.data.denorm(xb)[0])
probs = probs.numpy()
label1_idx = learn.data.classes.index(label1)
hmap1,xb_grad1 = get_grad_heatmap(learn,xb,label1_idx,size=xb_img.shape[-1])
prob1 = probs[label1_idx]
label2_args = None
if label2:
label2_idx = learn.data.classes.index(label2)
hmap2,xb_grad2 = get_grad_heatmap(learn,xb,label2_idx,size=xb_img.shape[-1])
prob2 = probs[label2_idx]
label2_args = [label2,prob2,hmap2,xb_grad2]
return cls(xb_img,label1,prob1,hmap1,xb_grad1,label2_args)
def __init__(self,xb_img,label1,prob1,hmap1,xb_grad1,label2_args=None):
self.xb_img=xb_img
self.label1,self.prob1,self.hmap1,self.xb_grad1 = label1,prob1,hmap1,xb_grad1
if label2_args:
self.label2,self.prob2,self.hmap2,self.xb_grad2 = label2_args
def plot(self,plot_hm=True,plot_gbp=True):
if not plot_hm and not plot_gbp:
plot_hm=True
cols = 5 if hasattr(self, 'label2') else 3
if not plot_gbp or not plot_hm:
cols-= 2 if hasattr(self, 'label2') else 1
fig,row_axes = plt.subplots(1,cols,figsize=(cols*5,5))
col=0
size=self.xb_img.shape[-1]
self.xb_img.show(row_axes[col]);col+=1
label1_title = f'1.{self.label1} {self.prob1:.3f}'
if plot_hm:
show_heatmap(self.hmap1,self.xb_img,size,row_axes[col])
row_axes[col].set_title(label1_title);col+=1
if plot_gbp:
row_axes[col].imshow(self.xb_grad1)
row_axes[col].set_axis_off()
row_axes[col].set_title(label1_title);col+=1
if hasattr(self, 'label2'):
label2_title = f'2.{self.label2} {self.prob2:.3f}'
if plot_hm:
show_heatmap(self.hmap2,self.xb_img,size,row_axes[col])
row_axes[col].set_title(label2_title);col+=1
if plot_gbp:
row_axes[col].imshow(self.xb_grad2)
row_axes[col].set_axis_off()
row_axes[col].set_title(label2_title)
# plt.tight_layout()
fig.subplots_adjust(wspace=0, hspace=0)
# fig.savefig('data_draw/both/gradcam.png')
def minmax_norm(x):
return (x - np.min(x))/(np.max(x) - np.min(x))
def scaleup(x,size):
scale_mult=size/x.shape[0]
upsampled = scipy.ndimage.zoom(x, scale_mult)
return upsampled
# hook for Gradcam
def hooked_backward(m,xb,target_layer,clas):
with hook_output(target_layer) as hook_a: #hook at last layer of group 0's output (after bn, size 512x7x7 if resnet34)
with hook_output(target_layer, grad=True) as hook_g: # gradient w.r.t to the target_layer
preds = m(xb)
preds[0,int(clas)].backward() # same as onehot backprop
return hook_a,hook_g
def clamp_gradients_hook(module, grad_in, grad_out):
for grad in grad_in:
torch.clamp_(grad, min=0.0)
# hook for guided backprop
def hooked_ReLU(m,xb,clas):
relu_modules = [module[1] for module in m.named_modules() if str(module[1]) == "ReLU(inplace)"]
with callbacks.Hooks(relu_modules, clamp_gradients_hook, is_forward=False) as _:
preds = m(xb)
preds[0,int(clas)].backward()
def guided_backprop(learn,xb,y):
xb = xb.cuda()
m = learn.model.eval();
xb.requires_grad_();
if not xb.grad is None:
xb.grad.zero_();
hooked_ReLU(m,xb,y);
return xb.grad[0].cpu().numpy()
def show_heatmap(hm,xb_im,size,ax=None):
if ax is None:
_,ax = plt.subplots()
xb_im.show(ax)
ax.imshow(hm, alpha=0.8, extent=(0,size,size,0),
interpolation='bilinear',cmap='magma');
def get_grad_heatmap(learn,xb,y,size):
'''
Main function to get hmap for heatmap and xb_grad for guided backprop
'''
xb = xb.cuda()
m = learn.model.eval();
target_layer = m[0][-1][-1] # last layer of group 0
hook_a,hook_g = hooked_backward(m,xb,target_layer,y)
target_act= hook_a.stored[0].cpu().numpy()
target_grad = hook_g.stored[0][0].cpu().numpy()
mean_grad = target_grad.mean(1).mean(1)
# hmap = (target_act*mean_grad[...,None,None]).mean(0)
hmap = (target_act*mean_grad[...,None,None]).sum(0)
hmap = np.where(hmap >= 0, hmap, 0)
xb_grad = guided_backprop(learn,xb,y) # (3,224,224)
#minmax norm the grad
xb_grad = minmax_norm(xb_grad)
hmap_scaleup = minmax_norm(scaleup(hmap,size)) # (224,224)
# multiply xb_grad and hmap_scaleup and switch axis
xb_grad = np.einsum('ijk, jk->jki',xb_grad, hmap_scaleup) #(224,224,3)
return hmap,xb_grad
```
I connect to google drive (this notebook was made on google colab for GPU usage) and load my pretrained learner.
```
from google.colab import drive
drive.mount('/content/drive')
base_dir = '/content/drive/My Drive/fellowshipai-data/final_3_class_data_train_test_split'
def get_data(sz): # This function returns an ImageDataBunch with a given image size
return ImageDataBunch.from_folder(base_dir+'/', train='train', valid='valid', # 0% validation because we already formed our testing set
ds_tfms=get_transforms(), size=sz, num_workers=4).normalize(imagenet_stats) # Normalized, 4 workers (multiprocessing) - 64 batch size (default)
arch = models.resnet34
data = get_data(224)
learn = cnn_learner(data,arch,metrics=[error_rate,Precision(average='micro'),Recall(average='micro')],train_bn=True,pretrained=True).mixup()
learn.load('model-224sz-basicaugments-oversampling-mixup-dLRs')
example_image = '/content/drive/My Drive/fellowshipai-data/final_3_class_data_train_test_split/train/raw/00000015.jpg'
img = open_image(example_image)
gcam = GradCam.from_one_img(learn,img) # using the GradCAM class
gcam.plot(plot_gbp = False) # We care about the heatmap (which is overlayed on top of the original image inherently)
gcam_heatmap = gcam.hmap1 # This is a 2d array
```
My pretrained learner correctly classified the image as raw with probability 0.996.
Note that images with very low noise and accurate feature importances (as with the example image) are
The learner is focusing on the steak in center view (heatmap pixels indicate feature importance).
```
from BBOXES_from_GRADCAM import BBoxerwGradCAM # load class from .py file
image_resizing_scale = [400,300]
bbox_scaling = [1,1,1,1]
bbox = BBoxerwGradCAM(learn,
gcam_heatmap,
example_image,
image_resizing_scale,
bbox_scaling)
for function in dir(bbox)[-18:]: print(function)
bbox.show_smoothheatmap()
bbox.show_contouredheatmap()
#bbox.show_bboxrectangle()
bbox.show_bboxpolygon()
bbox.show_bboxrectangle()
rect_coords, polygon_coords = bbox.get_bboxes()
rect_coords # x,y,w,h
polygon_coords
# IoU for object detection
def get_IoU(truth_coords, pred_coords):
pred_area = pred_coords[2]*pred_coords[3]
truth_area = truth_coords[2]*truth_coords[3]
# coords of intersection rectangle
x1 = max(truth_coords[0], pred_coords[0])
y1 = max(truth_coords[1], pred_coords[1])
x2 = min(truth_coords[2], pred_coords[2])
y2 = min(truth_coords[3], pred_coords[3])
# area of intersection rectangle
interArea = max(0, x2 - x1 + 1) * max(0, y2 - y1 + 1)
# area of prediction and truth rectangles
boxTruthArea = (truth_coords[2] - truth_coords[0] + 1) * (truth_coords[3] - truth_coords[1] + 1)
boxPredArea = (pred_coords[2] - pred_coords[0] + 1) * (pred_coords[3] - pred_coords[1] + 1)
# intersection over union
iou = interArea / float(boxTruthArea + boxPredArea - interArea)
return iou
get_IoU([80,40,240,180],rect_coords)
```
| github_jupyter |
```
from google.colab import drive
drive.mount('/content/drive')
cd /content/drive/MyDrive/ML-LaDECO/MLaDECO
```
### Importing libraries and methods from thermograms, ml_training and ultilites modules
```
import numpy as np
print('Project MLaDECO')
print('Author: Viswambhar Yasa')
print('Software version: 0.1')
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import tensorflow as tf
from tensorflow.keras import models
from thermograms.Utilities import Utilities
from ml_training.dataset_generation.fourier_transformation import fourier_transformation
from ml_training.dataset_generation.principal_componant_analysis import principal_componant_analysis
from utilites.segmentation_colormap_anno import segmentation_colormap_anno
from utilites.tolerance_maks_gen import tolerance_predicted_mask
import matplotlib.pyplot as plt
```
##### Importing dataset for training
```
root_path = r'utilites/datasets'
data_file_name = r'metal_data.hdf5'
thermal_class = Utilities()
thermal_data,experiment_list=thermal_class.open_file(root_path, data_file_name,True)
experiment_name=r'2021-12-15-Materialstudie_Metallproben-ML3-laserbehandelte_Probe-1000W-10s'
experimental_data=thermal_data[experiment_name]
```
##### Checking the shape and file format of the thermographic experiment dataset
```
experimental_data
```
##### Identifying the reflection phase index
```
input_data, reflection_st_index, reflection_end_index = fourier_transformation(experimental_data,
scaling_type='normalization', index=1)
from PIL import Image
```
##### Performing data normalization to improve the learning ability of machine learning model by scaling down the data between a smaller range
```
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import train_test_split
exp_data=np.array(experimental_data)
standardizing = StandardScaler()
std_output_data = standardizing.fit_transform(
exp_data.reshape(exp_data.shape[0], -1)).reshape(exp_data.shape)
normalizing = MinMaxScaler(feature_range=(0, 1))
nrm_output_data = normalizing.fit_transform(
exp_data.reshape(exp_data.shape[0], -1)).reshape(exp_data.shape)
```
##### Plotting thermograms after gaussian normalization and min max scaling operation
```
from mpl_toolkits.axes_grid1 import make_axes_locatable
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(121)
im1 = ax1.imshow(std_output_data[:,:,400].astype(np.float32), cmap='RdYlBu_r', interpolation='None')
ax1.set_title('Gaussian distribution scaling')
divider = make_axes_locatable(ax1)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im1, cax=cax, orientation='vertical')
ax1.axis('off')
ax2 = fig.add_subplot(122)
im2 = ax2.imshow(nrm_output_data[:,:,400].astype(np.float32), cmap='RdYlBu_r', interpolation='None')
ax2.set_title('Min-Max Normalization')
ax2.axis('off')
divider = make_axes_locatable(ax2)
cax = divider.append_axes('right', size='5%', pad=0.05)
fig.colorbar(im2, cax=cax, orientation='vertical')
plt.savefig(r"Documents/temp/metal_scaling.png",dpi=600,bbox_inches='tight',transparent=True)
plt.imshow(std_output_data[:,:,400].astype(np.float32),cmap='RdYlBu_r')
plt.colorbar()
#experiment_name='2021-05-11 - Variantenvergleich - VarioTherm Halogenlampe - Winkel 30°'
experimental_data=np.array(thermal_data[experiment_name])
import tensorflow as tf
def grayscale_image(data):
"""
Creates a gray scale image dataset from the input data
Args:
data (numpy array): Thermograms
Returns:
(numpy array): Gray scale images
"""
#print(data.shape)
seq_data=np.zeros((data.shape))
for i in range(data.shape[-1]):
temp=np.expand_dims(data[:,:,i],axis=-1)
#print(temp.shape)
a_i=tf.keras.utils.array_to_img(temp).convert('L')
#a_i=array_to_img(temp).convert('L')
imgGray = tf.keras.utils.img_to_array(a_i)
#print(imgGray.shape)
seq_data[:,:,i]=np.squeeze(imgGray)
return seq_data
d=grayscale_image(experimental_data)
d.shape
```
#### Extracting information from gray scale image and saving it in png format
```
from PIL import Image
from keras.preprocessing.image import array_to_img,img_to_array
data=experimental_data
plt.figure()
plt.imshow(d[:,:,500],cmap='RdYlBu_r')
#plt.imshow(std_output_data[:,:,250].astype(np.float64),cmap='gray')
plt.savefig("Documents/temp/metal_output.png")
plt.axis('off')
img=plt.imread('Documents/temp/metal_output.png')
img.shape
```
##### Performing principal companant analysis to features by filtering intensity
```
EOFs=principal_componant_analysis(experimental_data)
img1 = Image.fromarray(EOFs[:,:,0].astype(np.int8))
#img2 = Image.fromarray(EOFs[:,:,0].astype(np.float32))
plt.imshow(np.squeeze(EOFs),cmap='YlOrRd_r')
plt.colorbar()
plt.savefig("Documents/temp/metal_PCA.png",dpi=600,bbox_inches='tight',transparent=True)
mask=np.zeros(shape=(np.squeeze(EOFs).shape))
mask[np.squeeze(EOFs) > 250]=1
mask[np.squeeze(EOFs) < -450]=1
plt.imsave('Documents/temp/metal_mask1.png',np.squeeze(mask),cmap='binary_r')
substrate=mask
```
##### Final mask of the dataset
```
plt.imshow(mask)
import cv2
img1 = cv2.imread('Documents/temp/metal_mask1.png',0)
print(img1.shape)
```
##### converting the image format data to a numpy array format and scaling it between integer values based on number of features
```
img1[img1==255]=1
plt.imshow(img1,cmap='binary_r')
plt.colorbar()
```
##### Saving the segmentation mask
```
name='ml_training/dataset_generation/annots/'+experiment_name
np.save(name,img1)
ar=np.load(name+'.npy')
plt.imshow(ar,cmap='gray')
plt.colorbar()
```
| github_jupyter |
# Hierarchical Clustering
**Hierarchical clustering** refers to a class of clustering methods that seek to build a **hierarchy** of clusters, in which some clusters contain others. In this assignment, we will explore a top-down approach, recursively bipartitioning the data using k-means.
**Note to Amazon EC2 users**: To conserve memory, make sure to stop all the other notebooks before running this notebook.
## Import packages
```
from __future__ import print_function # to conform python 2.x print to python 3.x
import turicreate
import matplotlib.pyplot as plt
import numpy as np
import sys
import os
import time
from scipy.sparse import csr_matrix
from sklearn.cluster import KMeans
from sklearn.metrics import pairwise_distances
%matplotlib inline
```
## Load the Wikipedia dataset
```
wiki = turicreate.SFrame('people_wiki.sframe/')
```
As we did in previous assignments, let's extract the TF-IDF features:
```
wiki['tf_idf'] = turicreate.text_analytics.tf_idf(wiki['text'])
```
To run k-means on this dataset, we should convert the data matrix into a sparse matrix.
```
from em_utilities import sframe_to_scipy # converter
# This will take about a minute or two.
wiki = wiki.add_row_number()
tf_idf, map_word_to_index = sframe_to_scipy(wiki, 'tf_idf')
```
To be consistent with the k-means assignment, let's normalize all vectors to have unit norm.
```
from sklearn.preprocessing import normalize
tf_idf = normalize(tf_idf)
```
## Bipartition the Wikipedia dataset using k-means
Recall our workflow for clustering text data with k-means:
1. Load the dataframe containing a dataset, such as the Wikipedia text dataset.
2. Extract the data matrix from the dataframe.
3. Run k-means on the data matrix with some value of k.
4. Visualize the clustering results using the centroids, cluster assignments, and the original dataframe. We keep the original dataframe around because the data matrix does not keep auxiliary information (in the case of the text dataset, the title of each article).
Let us modify the workflow to perform bipartitioning:
1. Load the dataframe containing a dataset, such as the Wikipedia text dataset.
2. Extract the data matrix from the dataframe.
3. Run k-means on the data matrix with k=2.
4. Divide the data matrix into two parts using the cluster assignments.
5. Divide the dataframe into two parts, again using the cluster assignments. This step is necessary to allow for visualization.
6. Visualize the bipartition of data.
We'd like to be able to repeat Steps 3-6 multiple times to produce a **hierarchy** of clusters such as the following:
```
(root)
|
+------------+-------------+
| |
Cluster Cluster
+------+-----+ +------+-----+
| | | |
Cluster Cluster Cluster Cluster
```
Each **parent cluster** is bipartitioned to produce two **child clusters**. At the very top is the **root cluster**, which consists of the entire dataset.
Now we write a wrapper function to bipartition a given cluster using k-means. There are three variables that together comprise the cluster:
* `dataframe`: a subset of the original dataframe that correspond to member rows of the cluster
* `matrix`: same set of rows, stored in sparse matrix format
* `centroid`: the centroid of the cluster (not applicable for the root cluster)
Rather than passing around the three variables separately, we package them into a Python dictionary. The wrapper function takes a single dictionary (representing a parent cluster) and returns two dictionaries (representing the child clusters).
```
def bipartition(cluster, maxiter=400, num_runs=4, seed=None):
'''cluster: should be a dictionary containing the following keys
* dataframe: original dataframe
* matrix: same data, in matrix format
* centroid: centroid for this particular cluster'''
data_matrix = cluster['matrix']
dataframe = cluster['dataframe']
# Run k-means on the data matrix with k=2. We use scikit-learn here to simplify workflow.
kmeans_model = KMeans(n_clusters=2, max_iter=maxiter, n_init=num_runs, random_state=seed, n_jobs=1)
kmeans_model.fit(data_matrix)
centroids, cluster_assignment = kmeans_model.cluster_centers_, kmeans_model.labels_
# Divide the data matrix into two parts using the cluster assignments.
data_matrix_left_child, data_matrix_right_child = data_matrix[cluster_assignment==0], \
data_matrix[cluster_assignment==1]
# Divide the dataframe into two parts, again using the cluster assignments.
cluster_assignment_sa = turicreate.SArray(cluster_assignment) # minor format conversion
dataframe_left_child, dataframe_right_child = dataframe[cluster_assignment_sa==0], \
dataframe[cluster_assignment_sa==1]
# Package relevant variables for the child clusters
cluster_left_child = {'matrix': data_matrix_left_child,
'dataframe': dataframe_left_child,
'centroid': centroids[0]}
cluster_right_child = {'matrix': data_matrix_right_child,
'dataframe': dataframe_right_child,
'centroid': centroids[1]}
return (cluster_left_child, cluster_right_child)
```
The following cell performs bipartitioning of the Wikipedia dataset. Allow 2+ minutes to finish.
Note. For the purpose of the assignment, we set an explicit seed (`seed=1`) to produce identical outputs for every run. In pratical applications, you might want to use different random seeds for all runs.
```
%%time
wiki_data = {'matrix': tf_idf, 'dataframe': wiki} # no 'centroid' for the root cluster
left_child, right_child = bipartition(wiki_data, maxiter=100, num_runs=1, seed=0)
```
Let's examine the contents of one of the two clusters, which we call the `left_child`, referring to the tree visualization above.
```
left_child
```
And here is the content of the other cluster we named `right_child`.
```
right_child
```
## Visualize the bipartition
We provide you with a modified version of the visualization function from the k-means assignment. For each cluster, we print the top 5 words with highest TF-IDF weights in the centroid and display excerpts for the 8 nearest neighbors of the centroid.
```
def display_single_tf_idf_cluster(cluster, map_index_to_word):
'''map_index_to_word: SFrame specifying the mapping betweeen words and column indices'''
wiki_subset = cluster['dataframe']
tf_idf_subset = cluster['matrix']
centroid = cluster['centroid']
# Print top 5 words with largest TF-IDF weights in the cluster
idx = centroid.argsort()[::-1]
for i in range(5):
print('{0}:{1:.3f}'.format(map_index_to_word['category'], centroid[idx[i]])),
print('')
# Compute distances from the centroid to all data points in the cluster.
distances = pairwise_distances(tf_idf_subset, [centroid], metric='euclidean').flatten()
# compute nearest neighbors of the centroid within the cluster.
nearest_neighbors = distances.argsort()
# For 8 nearest neighbors, print the title as well as first 180 characters of text.
# Wrap the text at 80-character mark.
for i in range(8):
text = ' '.join(wiki_subset[nearest_neighbors[i]]['text'].split(None, 25)[0:25])
print('* {0:50s} {1:.5f}\n {2:s}\n {3:s}'.format(wiki_subset[nearest_neighbors[i]]['name'],
distances[nearest_neighbors[i]], text[:90], text[90:180] if len(text) > 90 else ''))
print('')
```
Let's visualize the two child clusters:
```
display_single_tf_idf_cluster(left_child, map_word_to_index)
display_single_tf_idf_cluster(right_child, map_word_to_index)
```
The right cluster consists of athletes and artists (singers and actors/actresses), whereas the left cluster consists of non-athletes and non-artists. So far, we have a single-level hierarchy consisting of two clusters, as follows:
```
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Non-athletes/artists Athletes/artists
```
Is this hierarchy good enough? **When building a hierarchy of clusters, we must keep our particular application in mind.** For instance, we might want to build a **directory** for Wikipedia articles. A good directory would let you quickly narrow down your search to a small set of related articles. The categories of athletes and non-athletes are too general to facilitate efficient search. For this reason, we decide to build another level into our hierarchy of clusters with the goal of getting more specific cluster structure at the lower level. To that end, we subdivide both the `athletes/artists` and `non-athletes/artists` clusters.
## Perform recursive bipartitioning
### Cluster of athletes and artists
To help identify the clusters we've built so far, let's give them easy-to-read aliases:
```
non_athletes_artists = left_child
athletes_artists = right_child
```
Using the bipartition function, we produce two child clusters of the athlete cluster:
```
# Bipartition the cluster of athletes and artists
left_child_athletes_artists, right_child_athletes_artists = bipartition(athletes_artists, maxiter=100, num_runs=6, seed=1)
```
The left child cluster mainly consists of athletes:
```
display_single_tf_idf_cluster(left_child_athletes_artists, map_word_to_index)
```
On the other hand, the right child cluster consists mainly of artists (singers and actors/actresses):
```
display_single_tf_idf_cluster(right_child_athletes_artists, map_word_to_index)
```
Our hierarchy of clusters now looks like this:
```
Wikipedia
+
|
+--------------------------+--------------------+
| |
+ +
Non-athletes/artists Athletes/artists
+
|
+----------+----------+
| |
| |
+ |
athletes artists
```
Should we keep subdividing the clusters? If so, which cluster should we subdivide? To answer this question, we again think about our application. Since we organize our directory by topics, it would be nice to have topics that are about as coarse as each other. For instance, if one cluster is about baseball, we expect some other clusters about football, basketball, volleyball, and so forth. That is, **we would like to achieve similar level of granularity for all clusters.**
Both the athletes and artists node can be subdivided more, as each one can be divided into more descriptive professions (singer/actress/painter/director, or baseball/football/basketball, etc.). Let's explore subdividing the athletes cluster further to produce finer child clusters.
Let's give the clusters aliases as well:
```
athletes = left_child_athletes_artists
artists = right_child_athletes_artists
```
### Cluster of athletes
In answering the following quiz question, take a look at the topics represented in the top documents (those closest to the centroid), as well as the list of words with highest TF-IDF weights.
Let us bipartition the cluster of athletes.
```
left_child_athletes, right_child_athletes = bipartition(athletes, maxiter=100, num_runs=6, seed=1)
display_single_tf_idf_cluster(left_child_athletes, map_word_to_index)
display_single_tf_idf_cluster(right_child_athletes, map_word_to_index)
```
**Quiz Question**. Which diagram best describes the hierarchy right after splitting the `athletes` cluster? Refer to the quiz form for the diagrams.
**Caution**. The granularity criteria is an imperfect heuristic and must be taken with a grain of salt. It takes a lot of manual intervention to obtain a good hierarchy of clusters.
* **If a cluster is highly mixed, the top articles and words may not convey the full picture of the cluster.** Thus, we may be misled if we judge the purity of clusters solely by their top documents and words.
* **Many interesting topics are hidden somewhere inside the clusters but do not appear in the visualization.** We may need to subdivide further to discover new topics. For instance, subdividing the `ice_hockey_football` cluster led to the appearance of runners and golfers.
### Cluster of non-athletes
Now let us subdivide the cluster of non-athletes.
```
%%time
# Bipartition the cluster of non-athletes
left_child_non_athletes_artists, right_child_non_athletes_artists = bipartition(non_athletes_artists, maxiter=100, num_runs=3, seed=1)
display_single_tf_idf_cluster(left_child_non_athletes_artists, map_word_to_index)
display_single_tf_idf_cluster(right_child_non_athletes_artists, map_word_to_index)
```
The clusters are not as clear, but the left cluster has a tendency to show important female figures, and the right one to show politicians and government officials.
Let's divide them further.
```
female_figures = left_child_non_athletes_artists
politicians_etc = right_child_non_athletes_artists
politicians_etc = left_child_non_athletes_artists
female_figures = right_child_non_athletes_artists
```
**Quiz Question**. Let us bipartition the clusters `female_figures` and `politicians`. Which diagram best describes the resulting hierarchy of clusters for the non-athletes? Refer to the quiz for the diagrams.
**Note**. Use `maxiter=100, num_runs=6, seed=1` for consistency of output.
```
left_female_figures, right_female_figures = bipartition(female_figures, maxiter=100, num_runs=6, seed=1)
left_politicians_etc, right_politicians_etc = bipartition(politicians_etc, maxiter=100, num_runs=6, seed=1)
display_single_tf_idf_cluster(left_female_figures, map_word_to_index)
display_single_tf_idf_cluster(right_female_figures, map_word_to_index)
display_single_tf_idf_cluster(left_politicians_etc, map_word_to_index)
display_single_tf_idf_cluster(right_politicians_etc, map_word_to_index)
```
| github_jupyter |
### 콜모고로프의 공리
1) 모든 사건에 대해 확률은 실수이고 0 또는 양수이다.
- $P(A) \geq 0$
2) 표본공간(전체집합) 이라는 사건(부분집합)에대 대한 확률은 1이다.
- $P(\Omega) = 1$
3) 공통원소가 없는 두 사건의 합집합의 확률은 사건별 확률의 합이다
- $A\cap B = \emptyset \rightarrow P(A\cup B) = P(A) + P(B)$
-----
### 확률 성질 요약
1) 공집합의 확률
- $P(0) = \emptyset$
2) 여집합의 확률
- $P(A\complement) = 1-P(A)$
3) 포함 배제의 원리
- $P(A\cup B) = P(A) + P(B) - P(A\cap B) $
4) 전체확률의 법칙
- $P(A) = \sum_i(A,C_i)$
----
### 확률 분포
- 표본의 개수가 유한한 경우 : 단순사건에 대한 정보만 전달하면 됨
- 확률질량함수 : $P({a})$
- 표본의 개수가 무한한 경우
- 구간 : 표본공간이 실수의 집합이면 사건은 시작점과 끝점이라는 두 숫자로 표현
$A = {a < x \leq b}$
1) 누적분포함수 (cdf == $F(x)$)
- $F(b) = F(a) + P(a,b)$
$\rightarrow P(a,b) = F(b) - F(a)$
2) 확률밀도함수 (pdf) : $F(x)$ 를 미분하여, 도함수를 구해서 기울기 출력
- $p(x) = \frac{dF(x)}{dx}$
- 누적분포함수와 확률밀도함수의 관계 $\rightarrow$ 적분
- $F(x) = \int^x_{-\infty}p(u)du$
###### 확률 밀도 함수의 특징
1) 누적분포함수의 기울기가 음수가 될 수 없기 때문에 확률밀도함수는 0보다 크거나 같다.
- $p(x) \geq 0$
2) $-\infty$부터 $\infty$까지 적분하면 표본공간$(-\infty , \infty)$의 확률이 되므로 값은 1이다.
- $\int^\infty_{-\infty} p(u)du = 1$
----
### 결합확률과 조건부 확률
- 결합확률 (joint probability)
- $P(A\cap B) or P(A,B)$
- 주변확률 (marginal probability)
- $P(A) , P(B)$
- 조건부 확률 (100% 참)
- $P(A|B) = new P(A) if P(B) = 1$
$ = \frac{P(A,B)}{P(B)} = \frac{P(Anew)}{P(\Omega new)}$
- 독립 (independent)
- 사건A와 사건B의 결합확률의 값이 $P(A,B) = P(A)P(B)$ 관계가 성립하면서 두 사건 $A$와 $B$는 서로 독립이라고 정의한다.
- $P(A) = P(A|B)$
----
#### 원인과 결과, 근거와 추론, 가정과 조건부 결론
- 조건부확률 $P(A|B)$에서 사건 (주장/명제) B, A
1) 가정과 그 가정에 따른 조건부 결론
2) 원인과 결과
3) 근거와 추론
$\rightarrow P(A,B) = P(A|B)P(B)$
: A,B가 모두 발생할 확률은 B라는 사건이 발생할 확률과 그 사건이 발생한 경우 다시 A가 발생할 경우의 곱
---
#### 사슬법칙
- $P(X_1,...,X_N) = P(X_1)\prod^N_{i=2}P(N_i|X_1,...,X_{i-1})$
----
#### 확률변수 (random variable)
- 확률적인 숫자를 출력하는 변수 ex) 랜덤박스
----
#### JointProbabilityDistribution(variables, cardinality, values)
- variables : 확률변수의 이름, 문자열의리스트, 정의하려는 확률변수가 하나인 경우에도 리스트로 넣어야 한다.
- cardinality : 각 확률변수의 표본 혹은 배타적 사건의 수의 리스트
- values : 확률 변수의 모든 표본(조합)에 대한 (결합) 확률값의 리스트
```
from pgmpy.factors.discrete import JointProbabilityDistribution as JPD
px = JPD(['X'], [2], np.array([12, 8]) / 20)
print(px)
py = JPD(['Y'], [2], np.array([10, 10])/20)
print(py)
pxy = JPD(['X', 'Y'], [2, 2], np.array([3, 9, 7, 1])/20)
print(pxy)
pxy2 = JPD(['X', 'Y'], [2, 2], np.array([6, 6, 4, 4, ])/20)
print(pxy2)
```
----
#### marginal_distribution()
- 인수로 받은 확률변수에 대한 주변확률분포를 구함
- $X$의 주변확률을 구해라
----
- 결합확률로 부터 주변확률 $P(A),P(A^\complement)$ 를 계산
```
pmx = pxy.marginal_distribution(['X'], inplace=False)
print(pmx)
```
#### marginalize()
- 인수로 받은 확률변수를 주변화(marginalize)하여 나머지 확률 변수에 대한 주변확률분포를 구한다.
- $X$를 구하기 위해서 $Y$를 없애라! (== 배타적인 사건으로 써라)
----
- 결합확률로부터 $P(A) , P(A^\complement)$ 를 계산
```
pmx = pxy.marginalize(['Y'], inplace=False)
print(pmx)
```
- 결합확률로부터 $P(B),P(B^\complement)$를 계산
```
py = pxy.marginal_distribution(['Y'], inplace=False)
print(pmy)
py = pxy.marginalize(['X'], inplace=False)
print(py)
```
#### conditional_distribution()
- 어떤 확률변수가 어떤 사건이 되는 조건에 대해 조건부확률값을 계산
---
- 결합확률로부터 조건부확률 $P(B|A),P(B^\complement|A)$ 를 계산
```
py_on_x0 = pxy.conditional_distribution([('X', 0)], inplace=False)
print(py_on_x0)
```
- 결합확률로부터 조건부확률 $P(B|A^\complement),P(B^\complement|A^\complement)$ 를 계산
```
py_on_x1 = pxy.conditional_distribution([('X', 1)], inplace=False)
print(py_on_x1)
```
- 결합확률로부터 조건부확률 $P(A|B),P(A^\complement|B)$ 를 계산
```
px_on_y0 = pxy.conditional_distribution([('Y', 0)], inplace=False)
print(px_on_y0)
px_on_y1 = pxy.conditional_distribution([('Y', 1)], inplace=False)
print(px_on_y1)
```
#### check_independence()
- 두 확률변수 간의 독립 확인 가능.
* 독립 : $P(A,B) = P(A)P(B)$
- 독립인 경우에는 결합확률을 다 구할 필요가 없다 .
```
pxy.check_independence(['X'], ['Y'])
```
- JointProbabilityDistribution 객체끼리 곱하면 두 분포가 독립이라는 가정하에 결합확률을 구함
```
# 독립이 아님
print(px*py)
print(pxy)
pxy2.check_independence(['X'], ['Y'])
```
| github_jupyter |
# Object Detection with SSD
### Here we demostrate detection on example images using SSD with PyTorch
```
import os
import sys
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
sys.path.append(module_path)
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.autograd import Variable
import numpy as np
import cv2
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
from ssd import build_ssd
```
## Build SSD300 in Test Phase
1. Build the architecture, specifyingsize of the input image (300),
and number of object classes to score (21 for VOC dataset)
2. Next we load pretrained weights on the VOC0712 trainval dataset
```
net = build_ssd('test', 300, 21) # initialize SSD
net.load_weights('../weights/ssd300_VOC_28000.pth')
```
## Load Image
### Here we just load a sample image from the VOC07 dataset
```
# image = cv2.imread('./data/example.jpg', cv2.IMREAD_COLOR) # uncomment if dataset not downloaded
%matplotlib inline
from matplotlib import pyplot as plt
from data import VOCDetection, VOC_ROOT, VOCAnnotationTransform
# here we specify year (07 or 12) and dataset ('test', 'val', 'train')
testset = VOCDetection(VOC_ROOT, [('2007', 'val')], None, VOCAnnotationTransform())
img_id = 60
image = testset.pull_image(img_id)
rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# View the sampled input image before transform
plt.figure(figsize=(10,10))
plt.imshow(rgb_image)
plt.show()
```
## Pre-process the input.
#### Using the torchvision package, we can create a Compose of multiple built-in transorm ops to apply
For SSD, at test time we use a custom BaseTransform callable to
resize our image to 300x300, subtract the dataset's mean rgb values,
and swap the color channels for input to SSD300.
```
x = cv2.resize(image, (300, 300)).astype(np.float32)
x -= (104.0, 117.0, 123.0)
x = x.astype(np.float32)
x = x[:, :, ::-1].copy()
plt.imshow(x)
x = torch.from_numpy(x).permute(2, 0, 1)
```
## SSD Forward Pass
### Now just wrap the image in a Variable so it is recognized by PyTorch autograd
```
xx = Variable(x.unsqueeze(0)) # wrap tensor in Variable
if torch.cuda.is_available():
xx = xx.cuda()
y = net(xx)
```
## Parse the Detections and View Results
Filter outputs with confidence scores lower than a threshold
Here we choose 60%
```
from data import VOC_CLASSES as labels
top_k=10
plt.figure(figsize=(10,10))
colors = plt.cm.hsv(np.linspace(0, 1, 21)).tolist()
plt.imshow(rgb_image) # plot the image for matplotlib
currentAxis = plt.gca()
detections = y.data
# scale each detection back up to the image
scale = torch.Tensor(rgb_image.shape[1::-1]).repeat(2)
for i in range(detections.size(1)):
j = 0
while detections[0,i,j,0] >= 0.6:
score = detections[0,i,j,0]
label_name = labels[i-1]
display_txt = '%s: %.2f'%(label_name, score)
pt = (detections[0,i,j,1:]*scale).cpu().numpy()
coords = (pt[0], pt[1]), pt[2]-pt[0]+1, pt[3]-pt[1]+1
color = colors[i]
currentAxis.add_patch(plt.Rectangle(*coords, fill=False, edgecolor=color, linewidth=2))
currentAxis.text(pt[0], pt[1], display_txt, bbox={'facecolor':color, 'alpha':0.5})
j+=1
```
| github_jupyter |
# Data Attribute Recommendation - TechED 2020 INT260
Getting started with the Python SDK for the Data Attribute Recommendation service.
## Business Scenario
We will consider a business scenario involving product master data. The creation and maintenance of this product master data requires the careful manual selection of the correct categories for a given product from a pre-defined hierarchy of product categories.
In this workshop, we will explore how to automate this tedious manual task with the Data Attribute Recommendation service.
<video controls src="videos/dar_prediction_material_table.mp4"/>
This workshop will cover:
* Data Upload
* Model Training and Deployment
* Inference Requests
We will work through a basic example of how to achieve these tasks using the [Python SDK for Data Attribute Recommendation](https://github.com/SAP/data-attribute-recommendation-python-sdk).
*Note: if you are doing several runs of this notebook on a trial account, you may see errors stating 'The resource can no longer be used. Usage limit has been reached'. It can be beneficial to [clean up the service instance](#Cleaning-up-a-service-instance) to free up limited trial resources acquired by an earlier run of the notebook. [Some limits](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html) cannot be reset this way.*
## Table of Contents
* [Exercise 01.1](#Exercise-01.1) - Installing the SDK and preparing the service key
* [Creating a service instance and key on BTP Trial](#Creating-a-service-instance-and-key)
* [Installing the SDK](#Installing-the-SDK)
* [Loading the service key into your Jupyter Notebook](#Loading-the-service-key-into-your-Jupyter-Notebook)
* [Exercise 01.2](#Exercise-01.2) - Uploading the data
* [Exercise 01.3](#Exercise-01.3) - Training the model
* [Exercise 01.4](#Exercise-01.4) - Deploying the Model and predicting labels
* [Resources](#Resources) - Additional reading
* [Cleaning up a service instance](#Cleaning-up-a-service-instance) - Clean up all resources on the service instance
* [Optional Exercises](#Optional-Exercises) - Optional exercises
## Requirements
See the [README in the Github repository for this workshop](https://github.com/SAP-samples/teched2020-INT260/blob/master/exercises/ex1-DAR/README.md).
# Exercise 01.1
*Back to [table of contents](#Table-of-Contents)*
In exercise 01.1, we will install the SDK and prepare the service key.
## Creating a service instance and key on BTP Trial
Please log in to your trial account: https://cockpit.eu10.hana.ondemand.com/trial/
In the your global account screen, go to the "Boosters" tab:

*Boosters are only available on the Trial landscape. If you are using a production environment, please follow this tutorial to manually [create a service instance and a service key](https://developers.sap.com/tutorials/cp-aibus-dar-service-instance.html)*.
In the Boosters tab, enter "Data Attribute Recommendation" into the search box. Then, select the
service tile from the search results:

The resulting screen shows details of the booster pack. Here, click the "Start" button and wait a few seconds.

Once the booster is finished, click the "go to Service Key" link to obtain your service key.

Finally, download the key and save it to disk.

## Installing the SDK
The Data Attribute Recommendation SDK is available from the Python package repository. It can be installed with the standard `pip` tool:
```
! pip install data-attribute-recommendation-sdk
```
*Note: If you are not using a Jupyter notebook, but instead a regular Python development environment, we recommend using a Python virtual environment to set up your development environment. Please see [the dedicated tutorial to learn how to install the SDK inside a Python virtual environment](https://developers.sap.com/tutorials/cp-aibus-dar-sdk-setup.html).*
## Loading the service key into your Jupyter Notebook
Once you downloaded the service key from the Cockpit, upload it to your notebook environment. The service key must be uploaded to same directory where the `teched2020-INT260_Data_Attribute_Recommendation.ipynb` is stored.
We first navigate to the file browser in Jupyter. On the top of your Jupyter notebook, right-click on the Jupyter logo and open in a new tab.

**In the file browser, navigate to the directory where the `teched2020-INT260_Data_Attribute_Recommendation.ipynb` notebook file is stored. The service key must reside next to this file.**
In the Jupyter file browser, click the **Upload** button (1). In the file selection dialog that opens, select the `defaultKey_*.json` file you downloaded previously from the SAP Cloud Platform Cockpit. Rename the file to `key.json`.
Confirm the upload by clicking on the second **Upload** button (2).

The service key contains your credentials to access the service. Please treat this as carefully as you would treat any password. We keep the service key as a separate file outside this notebook to avoid leaking the secret credentials.
The service key is a JSON file. We will load this file once and use the credentials throughout this workshop.
```
# First, set up logging so we can see the actions performed by the SDK behind the scenes
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
from pprint import pprint # for nicer output formatting
import json
import os
if not os.path.exists("key.json"):
msg = "key.json is not found. Please follow instructions above to create a service key of"
msg += " Data Attribute Recommendation. Then, upload it into the same directory where"
msg += " this notebook is saved."
print(msg)
raise ValueError(msg)
with open("key.json") as file_handle:
key = file_handle.read()
SERVICE_KEY = json.loads(key)
```
## Summary Exercise 01.1
In exercise 01.1, we have covered the following topics:
* How to install the Python SDK for Data Attribute Recommendation
* How to obtain a service key for the Data Attribute Recommendation service
# Exercise 01.2
*Back to [table of contents](#Table-of-Contents)*
*To perform this exercise, you need to execute the code in all previous exercises.*
In exercise 01.2, we will upload our demo dataset to the service.
## The Dataset
### Obtaining the Data
The dataset we use in this workshop is a CSV file containing product master data. The original data was released by BestBuy, a retail company, under an [open license](https://github.com/SAP-samples/data-attribute-recommendation-postman-tutorial-sample#data-and-license). This makes it ideal for first experiments with the Data Attribute Recommendation service.
The dataset can be downloaded directly from Github using the following command:
```
! wget -O bestBuy.csv "https://raw.githubusercontent.com/SAP-samples/data-attribute-recommendation-postman-tutorial-sample/master/Tutorial_Example_Dataset.csv"
# If you receive a "command not found" error (i.e. on Windows), try curl instead of wget:
# ! curl -o bestBuy.csv "https://raw.githubusercontent.com/SAP-samples/data-attribute-recommendation-postman-tutorial-sample/master/Tutorial_Example_Dataset.csv"
```
Let's inspect the data:
```
# if you are experiencing an import error here, run the following in a new cell:
# ! pip install pandas
import pandas as pd
df = pd.read_csv("bestBuy.csv")
df.head(5)
print()
print(f"Data has {df.shape[0]} rows and {df.shape[1]} columns.")
```
The CSV contains the several products. For each product, the description, the manufacturer and the price are given. Additionally, three levels of the products hierarchy are given.
The first product, a set of AAA batteries, is located in the following place in the product hierarchy:
```
level1_category: Connected Home & Housewares
|
level2_category: Housewares
|
level3_category: Household Batteries
```
We will use the Data Attribute Recommendation service to predict the categories for a given product based on its **description**, **manufacturer** and **price**.
### Creating the DatasetSchema
We first have to describe the shape of our data by creating a DatasetSchema. This schema informs the service about the individual column types found in the CSV. We also describe which are the target columns used for training. These columns will be later predicted. In our case, these are the three category columns.
The service currently supports three column types: **text**, **category** and **number**. For prediction, only **category** is currently supported.
A DatasetSchema for the BestBuy dataset looks as follows:
```json
{
"features": [
{"label": "manufacturer", "type": "CATEGORY"},
{"label": "description", "type": "TEXT"},
{"label": "price", "type": "NUMBER"}
],
"labels": [
{"label": "level1_category", "type": "CATEGORY"},
{"label": "level2_category", "type": "CATEGORY"},
{"label": "level3_category", "type": "CATEGORY"}
],
"name": "bestbuy-category-prediction",
}
```
We will now upload this DatasetSchema to the Data Attribute Recommendation service. The SDK provides the
[`DataManagerClient.create_dataset_schema()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.create_dataset_schema) method for this purpose.
```
from sap.aibus.dar.client.data_manager_client import DataManagerClient
dataset_schema = {
"features": [
{"label": "manufacturer", "type": "CATEGORY"},
{"label": "description", "type": "TEXT"},
{"label": "price", "type": "NUMBER"}
],
"labels": [
{"label": "level1_category", "type": "CATEGORY"},
{"label": "level2_category", "type": "CATEGORY"},
{"label": "level3_category", "type": "CATEGORY"}
],
"name": "bestbuy-category-prediction",
}
data_manager = DataManagerClient.construct_from_service_key(SERVICE_KEY)
response = data_manager.create_dataset_schema(dataset_schema)
dataset_schema_id = response["id"]
print()
print("DatasetSchema created:")
pprint(response)
print()
print(f"DatasetSchema ID: {dataset_schema_id}")
```
The API responds with the newly created DatasetSchema resource. The service assigned an ID to the schema. We save this ID in a variable, as we will need it when we upload the data.
### Uploading the Data to the service
The [`DataManagerClient`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient) class is also responsible for uploading data to the service. This data must fit to an existing DatasetSchema. After uploading the data, the service will validate the Dataset against the DataSetSchema in a background process. The data must be a CSV file which can optionally be `gzip` compressed.
We will now upload our `bestBuy.csv` file, using the DatasetSchema which we created earlier.
Data upload is a two-step process. We first create the Dataset using [`DataManagerClient.create_dataset()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.create_dataset). Then we can upload data to the Dataset using the [`DataManagerClient.upload_data_to_dataset()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.upload_data_to_dataset) method.
```
dataset_resource = data_manager.create_dataset("my-bestbuy-dataset", dataset_schema_id)
dataset_id = dataset_resource["id"]
print()
print("Dataset created:")
pprint(dataset_resource)
print()
print(f"Dataset ID: {dataset_id}")
# Compress file first for a faster upload
! gzip -9 -c bestBuy.csv > bestBuy.csv.gz
```
Note that the data upload can take a few minutes. Please do not restart the process while the cell is still running.
```
# Open in binary mode.
with open('bestBuy.csv.gz', 'rb') as file_handle:
dataset_resource = data_manager.upload_data_to_dataset(dataset_id, file_handle)
print()
print("Dataset after data upload:")
print()
pprint(dataset_resource)
```
Note that the Dataset status changed from `NO_DATA` to `VALIDATING`.
Dataset validation is a background process. The status will eventually change from `VALIDATING` to `SUCCEEDED`.
The SDK provides the [`DataManagerClient.wait_for_dataset_validation()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.wait_for_dataset_validation) method to poll for the Dataset validation.
```
dataset_resource = data_manager.wait_for_dataset_validation(dataset_id)
print()
print("Dataset after validation has finished:")
print()
pprint(dataset_resource)
```
If the status is `FAILED` instead of `SUCCEEDED`, then the `validationMessage` will contain details about the validation failure.
To better understand the Dataset lifecycle, refer to the [corresponding document on help.sap.com](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/a9b7429687a04e769dbc7955c6c44265.html).
## Summary Exercise 01.2
In exercise 01.2, we have covered the following topics:
* How to create a DatasetSchema
* How to upload a Dataset to the service
You can find optional exercises related to exercise 01.2 [below](#Optional-Exercises-for-01.2).
# Exercise 01.3
*Back to [table of contents](#Table-of-Contents)*
*To perform this exercise, you need to execute the code in all previous exercises.*
In exercise 01.3, we will train the model.
## Training the Model
The Dataset is now uploaded and has been validated successfully by the service.
To train a machine learning model, we first need to select the correct model template.
### Selecting the right ModelTemplate
The Data Attribute Recommendation service currently supports two different ModelTemplates:
| ID | Name | Description |
|--------------------------------------|---------------------------|---------------------------------------------------------------------------|
| d7810207-ca31-4d4d-9b5a-841a644fd81f | **Hierarchical template** | Recommended for the prediction of multiple classes that form a hierarchy. |
| 223abe0f-3b52-446f-9273-f3ca39619d2c | **Generic template** | Generic neural network for multi-label, multi-class classification. |
| 188df8b2-795a-48c1-8297-37f37b25ea00 | **AutoML template** | Finds the [best traditional machine learning model out of several traditional algorithms](https://blogs.sap.com/2021/04/28/how-does-automl-works-in-data-attribute-recommendation/). Single label only. |
We are building a model to predict product hierarchies. The **Hierarchical Template** is correct for this scenario. In this template, the first label in the DatasetSchema is considered the top-level category. Each subsequent label is considered to be further down in the hierarchy.
Coming back to our example DatasetSchema:
```json
{
"labels": [
{"label": "level1_category", "type": "CATEGORY"},
{"label": "level2_category", "type": "CATEGORY"},
{"label": "level3_category", "type": "CATEGORY"}
]
}
```
The first defined label is `level1_category`, which is given more weight during training than `level3_category`.
Refer to the [official documentation on ModelTemplates](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/1e76e8c636974a06967552c05d40e066.html) to learn more. Additional model templates may be added over time, so check back regularly.
## Starting the training
When working with models, we use the [`ModelManagerClient`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient) class.
To start the training, we need the IDs of the dataset and the desired model template. We also have to provide a name for the model.
The [`ModelManagerClient.create_job()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.create_job) method launches the training Job.
*Only one model of a given name can exist. If you receive a message stating 'The model name specified is already in use', you either have to remove the job and its associated model first or you have to change the `model_name` variable name below. You can also [clean up the entire service instance](#Cleaning-up-a-service-instance).*
```
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
from sap.aibus.dar.client.exceptions import DARHTTPException
model_manager = ModelManagerClient.construct_from_service_key(SERVICE_KEY)
model_template_id = "d7810207-ca31-4d4d-9b5a-841a644fd81f" # hierarchical template
model_name = "bestbuy-hierarchy-model"
job_resource = model_manager.create_job(model_name, dataset_id, model_template_id)
job_id = job_resource['id']
print()
print("Job resource:")
print()
pprint(job_resource)
print()
print(f"ID of submitted Job: {job_id}")
```
The job is now running in the background. Similar to the DatasetValidation, we have to poll the job until it succeeds.
The SDK provides the [`ModelManagerClient.wait_for_job()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_job) method:
```
job_resource = model_manager.wait_for_job(job_id)
print()
print("Job resource after training is finished:")
pprint(job_resource)
```
To better understand the Training Job lifecycle, see the [corresponding document on help.sap.com](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/0fc40aa077ce4c708c1e5bfc875aa3be.html).
## Intermission
The model training will take between 5 and 10 minutes.
In the meantime, we can explore the available [resources](#Resources) for both the service and the SDK.
## Inspecting the Model
Once the training job is finished successfully, we can inspect the model using [`ModelManagerClient.read_model_by_name()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_model_by_name).
```
model_resource = model_manager.read_model_by_name(model_name)
print()
pprint(model_resource)
```
In the model resource, the `validationResult` key provides information about model performance. You can also use these metrics to compare performance of different [ModelTemplates](#Selecting-the-right-ModelTemplate) or different datasets.
## Summary Exercise 01.3
In exercise 01.3, we have covered the following topics:
* How to select the appropriate ModelTemplate
* How to train a Model from a previously uploaded Dataset
You can find optional exercises related to exercise 01.3 [below](#Optional-Exercises-for-01.3).
# Exercise 01.4
*Back to [table of contents](#Table-of-Contents)*
*To perform this exercise, you need to execute the code in all previous exercises.*
In exercise 01.4, we will deploy the model and predict labels for some unlabeled data.
## Deploying the Model
The training job has finished and the model is ready to be deployed. By deploying the model, we create a server process in the background on the Data Attribute Recommendation service which will serve inference requests.
In the SDK, the [`ModelManagerClient.create_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#module-sap.aibus.dar.client.model_manager_client) method lets us create a Deployment.
```
deployment_resource = model_manager.create_deployment(model_name)
deployment_id = deployment_resource["id"]
print()
print("Deployment resource:")
print()
pprint(deployment_resource)
print(f"Deployment ID: {deployment_id}")
```
*Note: if you are using a trial account and you see errors such as 'The resource can no longer be used. Usage limit has been reached', consider [cleaning up the service instance](#Cleaning-up-a-service-instance) to free up limited trial resources.*
Similar to the data upload and the training job, model deployment is an asynchronous process. We have to poll the API until the Deployment is in status `SUCCEEDED`. The SDK provides the [`ModelManagerClient.wait_for_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_deployment) for this purposes.
```
deployment_resource = model_manager.wait_for_deployment(deployment_id)
print()
print("Finished deployment resource:")
print()
pprint(deployment_resource)
```
Once the Deployment is in status `SUCCEEDED`, we can run inference requests.
To better understand the Deployment lifecycle, see the [corresponding document on help.sap.com](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/f473b5b19a3b469e94c40eb27623b4f0.html).
*For trial users: the deployment will be stopped after 8 hours. You can restart it by deleting the deployment and creating a new one for your model. The [`ModelManagerClient.ensure_deployment_exists()`](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html) method will delete and re-create automatically. Then, you need to poll until the deployment is succeeded using [`ModelManagerClient.wait_for_deployment()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.wait_for_deployment) as above.*
## Executing Inference requests
With a single inference request, we can send up to 50 objects to the service to predict the labels. The data send to the service must match the `features` section of the DatasetSchema created earlier. The `labels` defined inside of the DatasetSchema will be predicted for each object and returned as a response to the request.
In the SDK, the [`InferenceClient.create_inference_request()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.inference_client.InferenceClient.create_inference_request) method handles submission of inference requests.
```
from sap.aibus.dar.client.inference_client import InferenceClient
inference = InferenceClient.construct_from_service_key(SERVICE_KEY)
objects_to_be_classified = [
{
"features": [
{"name": "manufacturer", "value": "Energizer"},
{"name": "description", "value": "Alkaline batteries; 1.5V"},
{"name": "price", "value": "5.99"},
],
},
]
inference_response = inference.create_inference_request(model_name, objects_to_be_classified)
print()
print("Inference request processed. Response:")
print()
pprint(inference_response)
```
*Note: For trial accounts, you only have a limited number of objects which you can classify.*
You can also try to come up with your own example:
```
my_own_items = [
{
"features": [
{"name": "manufacturer", "value": "EDIT THIS"},
{"name": "description", "value": "EDIT THIS"},
{"name": "price", "value": "0.00"},
],
},
]
inference_response = inference.create_inference_request(model_name, my_own_items)
print()
print("Inference request processed. Response:")
print()
pprint(inference_response)
```
You can also classify multiple objects at once. For each object, the `top_n` parameter determines how many predictions are returned.
```
objects_to_be_classified = [
{
"objectId": "optional-identifier-1",
"features": [
{"name": "manufacturer", "value": "Energizer"},
{"name": "description", "value": "Alkaline batteries; 1.5V"},
{"name": "price", "value": "5.99"},
],
},
{
"objectId": "optional-identifier-2",
"features": [
{"name": "manufacturer", "value": "Eidos"},
{"name": "description", "value": "Unravel a grim conspiracy at the brink of Revolution"},
{"name": "price", "value": "19.99"},
],
},
{
"objectId": "optional-identifier-3",
"features": [
{"name": "manufacturer", "value": "Cadac"},
{"name": "description", "value": "CADAC Grill Plate for Safari Chef Grills: 12\""
+ "cooking surface; designed for use with Safari Chef grills;"
+ "105 sq. in. cooking surface; PTFE nonstick coating;"
+ " 2 grill surfaces"
},
{"name": "price", "value": "39.99"},
],
}
]
inference_response = inference.create_inference_request(model_name, objects_to_be_classified, top_n=3)
print()
print("Inference request processed. Response:")
print()
pprint(inference_response)
```
We can see that the service now returns the `n-best` predictions for each label as indicated by the `top_n` parameter.
In some cases, the predicted category has the special value `nan`. In the `bestBuy.csv` data set, not all records have the full set of three categories. Some records only have a top-level category. The model learns this fact from the data and will occasionally suggest that a record should not have a category.
```
# Inspect all video games with just a top-level category entry
video_games = df[df['level1_category'] == 'Video Games']
video_games.loc[df['level2_category'].isna() & df['level3_category'].isna()].head(5)
```
To learn how to execute inference calls without the SDK just using the underlying RESTful API, see [Inference without the SDK](#Inference-without-the-SDK).
## Summary Exercise 01.4
In exercise 01.4, we have covered the following topics:
* How to deploy a previously trained model
* How to execute inference requests against a deployed model
You can find optional exercises related to exercise 01.4 [below](#Optional-Exercises-for-01.4).
# Wrapping up
In this workshop, we looked into the following topics:
* Installation of the Python SDK for Data Attribute Recommendation
* Modelling data with a DatasetSchema
* Uploading data into a Dataset
* Training a model
* Predicting labels for unlabelled data
Using these tools, we are able to solve the problem of missing Master Data attributes starting from just a CSV file containing training data.
Feel free to revisit the workshop materials at any time. The [resources](#Resources) section below contains additional reading.
If you would like to explore the additional capabilities of the SDK, visit the [optional exercises](#Optional-Exercises) below.
## Cleanup
During the course of the workshop, we have created several resources on the Data Attribute Recommendation Service:
* DatasetSchema
* Dataset
* Job
* Model
* Deployment
The SDK provides several methods to delete these resources. Note that there are dependencies between objects: you cannot delete a Dataset without deleting the Model beforehand.
You will need to set `CLEANUP_SESSION = True` below to execute the cleanup.
```
# Clean up all resources created earlier
CLEANUP_SESSION = False
def cleanup_session():
model_manager.delete_deployment_by_id(deployment_id) # this can take a few seconds
model_manager.delete_model_by_name(model_name)
model_manager.delete_job_by_id(job_id)
data_manager.delete_dataset_by_id(dataset_id)
data_manager.delete_dataset_schema_by_id(dataset_schema_id)
print("DONE cleaning up!")
if CLEANUP_SESSION:
print("Cleaning up resources generated in this session.")
cleanup_session()
else:
print("Not cleaning up. Set 'CLEANUP_SESSION = True' above and run again!")
```
## Resources
*Back to [table of contents](#Table-of-Contents)*
### SDK Resources
* [SDK source code on Github](https://github.com/SAP/data-attribute-recommendation-python-sdk)
* [SDK documentation](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/)
* [How to obtain support](https://github.com/SAP/data-attribute-recommendation-python-sdk/blob/master/README.md#how-to-obtain-support)
* [Tutorials: Classify Data Records with the SDK for Data Attribute Recommendation](https://developers.sap.com/group.cp-aibus-data-attribute-sdk.html)
### Data Attribute Recommendation
* [SAP Help Portal](https://help.sap.com/viewer/product/Data_Attribute_Recommendation/SHIP/en-US)
* [API Reference](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/b45cf9b24fd042d082c16191aa938c8d.html)
* [Tutorials using Postman - interact with the service RESTful API directly](https://developers.sap.com/mission.cp-aibus-data-attribute.html)
* [Trial Account Limits](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/c03b561eea1744c9b9892b416037b99a.html)
* [Metering and Pricing](https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/1e093326a2764c298759fcb92c5b0500.html)
## Addendum
### Inference without the SDK
*Back to [table of contents](#Table-of-Contents)*
The Data Attribute Service exposes a RESTful API. The SDK we use in this workshop uses this API to interact with the DAR service.
For custom integration, you can implement your own client for the API. The tutorial "[Use Machine Learning to Classify Data Records]" is a great way to explore the Data Attribute Recommendation API with the Postman REST client. Beyond the tutorial, the [API Reference] is a comprehensive documentation of the RESTful interface.
[Use Machine Learning to Classify Data Records]: https://developers.sap.com/mission.cp-aibus-data-attribute.html
[API Reference]: https://help.sap.com/viewer/105bcfd88921418e8c29b24a7a402ec3/SHIP/en-US/b45cf9b24fd042d082c16191aa938c8d.html
To demonstrate the underlying API, the next example uses the `curl` command line tool to perform an inference request against the Inference API.
The example uses the `jq` command to extract the credentials from the service. The authentication token is retrieved from the `uaa_url` and then used for the inference request.
```
# If the following example gives you errors that the jq or curl commands cannot be found,
# you may be able to install them from conda by uncommenting one of the lines below:
#%conda install -q jq
#%conda install -q curl
%%bash -s "$model_name" # Pass the python model_name variable as the first argument to shell script
model_name=$1
echo "Model: $model_name"
key=$(cat key.json)
url=$(echo $key | jq -r .url)
uaa_url=$(echo $key | jq -r .uaa.url)
clientid=$(echo $key | jq -r .uaa.clientid)
clientsecret=$(echo $key | jq -r .uaa.clientsecret)
echo "Service URL: $url"
token_url=${uaa_url}/oauth/token?grant_type=client_credentials
echo "Obtaining token with clientid $clientid from $token_url"
bearer_token=$(curl \
--silent --show-error \
--user $clientid:$clientsecret \
$token_url \
| jq -r .access_token
)
inference_url=${url}/inference/api/v3/models/${model_name}/versions/1
echo "Running inference request against endpoint $inference_url"
echo ""
# We pass the token in the Authorization header.
# The payload for the inference request is passed as
# the body of the POST request below.
# The output of the curl command is piped through `jq`
# for pretty-printing
curl \
--silent --show-error \
--header "Authorization: Bearer ${bearer_token}" \
--header "Content-Type: application/json" \
-XPOST \
${inference_url} \
-d '{
"objects": [
{
"features": [
{
"name": "manufacturer",
"value": "Energizer"
},
{
"name": "description",
"value": "Alkaline batteries; 1.5V"
},
{
"name": "price",
"value": "5.99"
}
]
}
]
}' | jq
```
### Cleaning up a service instance
*Back to [table of contents](#Table-of-Contents)*
To clean all data on the service instance, you can run the following snippet. The code is self-contained and does not require you to execute any of the cells above. However, you will need to have the `key.json` containing a service key in place.
You will need to set `CLEANUP_EVERYTHING = True` below to execute the cleanup.
**NOTE: This will delete all data on the service instance!**
```
CLEANUP_EVERYTHING = False
def cleanup_everything():
import logging
import sys
logging.basicConfig(level=logging.INFO, stream=sys.stdout)
import json
import os
if not os.path.exists("key.json"):
msg = "key.json is not found. Please follow instructions above to create a service key of"
msg += " Data Attribute Recommendation. Then, upload it into the same directory where"
msg += " this notebook is saved."
print(msg)
raise ValueError(msg)
with open("key.json") as file_handle:
key = file_handle.read()
SERVICE_KEY = json.loads(key)
from sap.aibus.dar.client.model_manager_client import ModelManagerClient
model_manager = ModelManagerClient.construct_from_service_key(SERVICE_KEY)
for deployment in model_manager.read_deployment_collection()["deployments"]:
model_manager.delete_deployment_by_id(deployment["id"])
for model in model_manager.read_model_collection()["models"]:
model_manager.delete_model_by_name(model["name"])
for job in model_manager.read_job_collection()["jobs"]:
model_manager.delete_job_by_id(job["id"])
from sap.aibus.dar.client.data_manager_client import DataManagerClient
data_manager = DataManagerClient.construct_from_service_key(SERVICE_KEY)
for dataset in data_manager.read_dataset_collection()["datasets"]:
data_manager.delete_dataset_by_id(dataset["id"])
for dataset_schema in data_manager.read_dataset_schema_collection()["datasetSchemas"]:
data_manager.delete_dataset_schema_by_id(dataset_schema["id"])
print("Cleanup done!")
if CLEANUP_EVERYTHING:
print("Cleaning up all resources in this service instance.")
cleanup_everything()
else:
print("Not cleaning up. Set 'CLEANUP_EVERYTHING = True' above and run again.")
```
### Optional Exercises
*Back to [table of contents](#Table-of-Contents)*
To work with the optional exercises, create a new cell in the Jupyter notebook by clicking the `+` button in the menu above or by using the `b` shortcut on your keyboard. You can then enter your code in the new cell and execute it.
#### Optional Exercises for 01.2
##### DatasetSchemas
Use the [`DataManagerClient.read_dataset_schema_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_schema_by_id) and the [`DataManagerClient.read_dataset_schema_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_schema_collection) methods to list the newly created and all DatasetSchemas, respectively.
##### Datasets
Use the [`DataManagerClient.read_dataset_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_by_id) and the [`DataManagerClient.read_dataset_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.read_dataset_collection) methods to inspect the newly created dataset.
Instead of using two separate methods to upload data and wait for validation to finish, you can also use [`DataManagerClient.upload_data_and_validate()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.data_manager_client.DataManagerClient.upload_data_and_validate).
#### Optional Exercises for 01.3
##### ModelTemplates
Use the [`ModelManagerClient.read_model_template_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_model_template_collection) to list all existing model templates.
##### Jobs
Use [`ModelManagerClient.read_job_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_job_by_id) and [`ModelManagerClient.read_job_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_job_collection) to inspect the job we just created.
The entire process of uploading the data and starting the training is also available as a single method call in [`ModelCreator.create()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.workflow.model.ModelCreator.create).
#### Optional Exercises for 01.4
##### Deployments
Use [`ModelManagerClient.read_deployment_by_id()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_deployment_by_id) and [`ModelManagerClient.read_deployment_collection()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.read_deployment_collection) to inspect the Deployment.
Use the [`ModelManagerclient.lookup_deployment_id_by_model_name()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.model_manager_client.ModelManagerClient.lookup_deployment_id_by_model_name) method to find the deployment ID for a given model name.
##### Inference
Use the [`InferenceClient.do_bulk_inference()`](https://data-attribute-recommendation-python-sdk.readthedocs.io/en/latest/api.html#sap.aibus.dar.client.inference_client.InferenceClient.do_bulk_inference) method to process more than fifty objects at a time. Note how the data format returned changes.
| github_jupyter |
# Resumen
Este cuaderno digital interactivo tiene como objetivo demostrar las relaciones entre las propiedades fisico-químicas de la vegetación y el espectro solar.
Para ello haremos uso de modelos de simulación, en particular de modelos de transferencia radiativa tanto a nivel de hoja individual como a nivel de dosel vegetal.
# Instrucciones
Lee con detenimiento todo el texto, y sigue sus instrucciones.
Una vez leida cada sección de texto ejecuta la celda de código siguiente (marcada como `In []`) presionando el icono de `Run`/`Ejecutar` o presionando en el teclado ALT + ENTER. Aparecerá una interfaz gráfica con la que poder realizar las tareas asignadas.
Como ejemplo ejectuta la siguiente celda para importar todas las librerías necesarias para el correcto funcionamiento del cuaderno. Una vez ejecutada debería aparecer un mensaje de agradecimiento.
```
%matplotlib inline
from ipywidgets import interactive, fixed
from IPython.display import display
from functions import prosail_and_spectra as fn
```
# Espectro de una hoja
Las propiedades espectrales de una hoja (tanto su transmisividad, su reflectividad y su absortividad) dependen de su concentración de pigmentos, de su contenido de agua, su peso específico y la estructura interna de sus tejidos.
Vamos a usar el modelo ProspectD, el cual es una simplificación de la realidad en la que simula el espectro mediante la concentración de clorofilas (`Cab`), carotenoides (`Car`), antocianinos (`Ant`), así como el peso de agua y por unidad de supeficie (`Cw`) y el peso del resto de la materia seca (`Cm`) que engloba las celulosas, ligninas (responsables principales de la biomasa foliar) y otros componentes proteicos. También incluye un parámetro semi-empírico que representa otros pigmentos responsables del color de las hojas senescentes y enfermas. Además con el fin de simular hojas con distintas estructuras celulares incluye un último parámetro (`Nf`) que emula las distitas capas y tejidos celulares de la hoja.
 con idénticas propiedades espectrales")
> Si quieres saber más sobre el modelo ProspectD pincha en esta [publicación](./lecturas_adicionales/ProspectD_model.pdf).
>
> Si quieres más detalles sobre el cálculo y el código del modelo pincha [aquí](https://github.com/hectornieto/pypro4sail/blob/b111891e0a2c01b8b3fa5ff41790687d31297e5f/pypro4sail/prospect.py#L46).
Ejecuta la siguiente célula y verás un espectro típico de la hoja. El gráfico muestra tanto la reflectividad (en el eje y) como la transmisividad (en el eje secundario y, con valores invertidos) y la absortividad (como el espacio entre las dos curvas de reflectividad y transmisividad) $\rho + \tau + \alpha = 1$.
Presta atención a cómo y en qué regiones cambia el espectro según el parámetro que modifiques.
* Haz variar la clorofila.
* Haz variar el contenido de agua
* Haz variar la materia seca
* Haz variar los pigmentos marrones desde un valor de 0 (hoja sana) a valores mayores (hoja enferma o seca)
```
w_rho_leaf = interactive(fn.update_prospect_spectrum, N_leaf=fn.w_nleaf, Cab=fn.w_cab,
Car=fn.w_car, Ant=fn.w_ant, Cbrown=fn.w_cbrown, Cw=fn.w_cw, Cm=fn.w_cm)
display(w_rho_leaf)
```
Observa lo siguente:
* La concentración de clorofila `Cab` afecta principalmente a la región del visible (RGB) y del *red egde* (R-E), con más absorción en la región del rojo y del azul y más reflexión en el verde. Es por ello que la mayoría de las hojas presentan color verde.
* El contenido de agua `Cw` afecta principalmente a la absorción en el infrarrojo de onda corta (SWIR), con máximos de absorción en trono a los 1460 y 2100 nm.
* La materia seca `Cm` afecta principalmente a la absorción en el infrarrojo cercano (NIR).
* Otros pigmentos afectan en menor medida al espectro visible. Por ejemplo los antocianos `Ant` que suelen aparecer durante la senescencia desplazan el pico de reflexión del verde hacia el rojo, sobre todo cuando a su vez decrece la concentración de clorofila.
* El parámetro `N` afecta a la relación entre reflectividad y transmisividad. Cuantas más *capas* tenga una hoja más fenómenos de dispersión múltiple habrá y reflejará más.
> Puedes ver este fenómeno también en las ventanas con doble o triple cristal usadas como aislante, por ejemplo de los escaparates comerciales. A no ser que uno se sitúen justo de frente y cerca del escaparate, éste parece más un espejo que una ventana.
# Espectro del suelo
El espectro del dosel o de la supeficie vegetal no sólo depende del espectro y las propiedades de las hojas, sino que también de la propia estructura del dosel así como del suelo. En particular en doseles abiertos o poco densos, como en las primeras fases fenológicas, el comportamiento espectral del suelo puede influir de manera muy importante en la señal espectral que capten los sensores de teledetección.
El espectro del suelo depende de varios factores, como son su composición mineralógica, materia orgánica, su textura y densidad así como su humedad superficial.
Ejectuta la siguiente celda y mira los distintas características espectrales de distintos tipos de suelo.
```
w_rho_soil = interactive(fn.update_soil_spectrum, soil_name=fn.w_soil)
display(w_rho_soil)
```
Observa lo diferente que puede ser un espectro de suelo en comparación con el de una hoja. Esto es clave a la hora de clasificar tipos de coberturas mediante teledetección así como cuantificar el vigor/densidad vegetal del cultivo.
Observa que suelos más salinos (`aridisol.salorthid`) o gipsicos (`aridisol.gypsiorthd`), tienen una mayor reflectividad, sobre todo en el visible (RGB). Es decir, son más blancos que otros suelos.
# Espectro del dosel
Finalmente, integrando la firma espectral de una hoja y del suelo subyacente podemos obtener el espectro de un dosel vegetal.
El espectro de la superficie vegetal además depende de la estructura del dosel, principalmente de la cantidad de hojas por unidad de superficie (definido como el Índice de Área Foliar) y de cómo estas hojas se orientan con respecto a la vertical. Además, dado que se produce una interacción de la luz incidente y reflejada entre el volumen de hojas y el suelo, la posición del sol y del sensor influyen en la señal espectral que obtengamos.
Para esta parte cobinaremos el modelo de transferencia ProspectD para simular el espectro de una hoja con otro modelo de trasnferencia a nivel de dosel (4SAIL). Este último modelo considera la superficie vegetal como una capa horizontal y verticalmente homogéna, por lo que se recomienda cautela en su aplicación en doseles arbóreos heterogéneos.

> Si quieres saber más sobre el modelo 4SAIL pincha en esta [publicación](./lecturas_adicionales/4SAIL_model.pdf)
>
> Si quieres más detalles sobre el cálculo y el código del modelo pincha [aquí](https://github.com/hectornieto/pypro4sail/blob/b111891e0a2c01b8b3fa5ff41790687d31297e5f/pypro4sail/four_sail.py#L245)
Ejecuta la siguente celda y mira cómo los [espectros de hoja](#Espectro-de-una-hoja) y [suelo](#Espectro-del-suelo) que se han generado previamente se integran para obtener un espectro de la superficie vegetal.
> Puedes modificar los espectros de hoja y suelo, y esta gráfica se actualizará automáticamente.
```
w_rho_canopy = interactive(fn.update_4sail_spectrum,
lai=fn.w_lai, hotspot=fn.w_hotspot, leaf_angle=fn.w_leaf_angle,
sza=fn.w_sza, vza=fn.w_vza, psi=fn.w_psi, skyl=fn.w_skyl,
leaf_spectrum=fixed(w_rho_leaf), soil_spectrum=fixed(w_rho_soil))
display(w_rho_canopy)
```
Recuerda en la [práctica sobre la radiación neta](./ES_radiacion_neta.ipynb) que una superficie vegetal tiene ciertas propiedades anisotrópicas, lo que quiere decir que reflejará de manera distinta según la geometria de iluminación y de observación.
Mira cómo cambia el espectro variando los valores del ángulo de observación cenital (VZA), ańgulo cenital del sol (SZA) y el ángulo azimutal relativo (PSI) entre el sol y el observador.
Haz variar el LAI, y ponlo en cero (sin vegetación). Comprueba que el espectro que sale es directamente el espectro del suelo. Ahora incrementa ligeramente el LAI, verás como el espectro va cambiando, disminuyendo la reflectividad en el rojo y azul (debido a la clorofila de la hoja), y aumentando la reflectividad en el *red-edge* y el NIR.
Recuerda también de la [práctica sobre la radiación neta](./ES_radiacion_neta.ipynb) el efecto que también tiene la disposición angular de las hojas. Con una observación al nadir (VZA=0) haz variar el ángulo típico de la hoja (`Leaf Angle`) desde un valor predominantemente horizontal (0º) a un ángulo predominantemente vertical (90º)
# Sensibilidad de los parámetros
En esta tarea podrás ver el comportamiento espectral de la vegetación según varían los parámetros fisico-químicos de la vegetación así como su sensibilidad a las condiciones de observación e iluminación.
Para ello vamos a realizar un análisis de sensibilidad variando un sólo parámetro a la vez, mientras que el resto de los parámetros permanecerán constantes. Puedes variar los valores individuales para el resto de los parámetros individuales (también se actualizarán de las gráficas anteriores). A continuación selecciona qué parámetro quieres analizar y el rango de valores máximo y mínimo que quieras que tenga.
```
w_sensitivity = interactive(fn.prosail_sensitivity,
N_leaf=fn.w_nleaf, Cab=fn.w_cab, Car=fn.w_car, Ant=fn.w_ant, Cbrown=fn.w_cbrown,
Cw=fn.w_cw, Cm=fn.w_cm, lai=fn.w_lai, hotspot=fn.w_hotspot, leaf_angle=fn.w_leaf_angle,
sza=fn.w_sza, vza=fn.w_vza, psi=fn.w_psi, skyl=fn.w_skyl,
soil_name=fn.w_soil, var=fn.w_param, value_range=fn.w_range)
display(w_sensitivity)
```
Empieza con al sensiblidad del espectro a la concentración de clorofila. Verás que la zona donde sobre todo hay variaciones es en el verde y el rojo. Observa también que en el *red-edge*, la zona de transición entre el rojo y el NIR, se produce un "desplazamiento" de la señal, este fenómento es clave y es la razón por la que los nuevos sensores (Sentinel, nuevas cámaras UAV) incluyen esta región para ayudar en la estimación de la clorofila y por tanto en la actividad fotosintética.
Evalúa la sensibilidad al espectro de otros pigmentos (`Car` o `Ant`). Verás que la respuesta espectral a estos otros pigmentos es menor, lo que implica que resulta más dificil estimarlos a partir de teledetección. En cambio la variación espectral con los pigmentos marrones es bastante fuerte, como recordatorio estos pigmentos representan las variaciones cromáticas que se producen en hojas enfermas y muertas.
> Esto implica que es relativamente posible detectar y cuantificar problemas sanitarios en la vegetación.
Mira ahora la sensibilidad del LAI cuando su rango es pequeño (p.ej. de 0 a 2). Verás que el espectro cambia significativamente según incrementa el LAI. Ahora mira la sensibilidad cuando el LAI recorre valores mas altos (p.ej. de 2 a 4), verás que la variación en el espectro es mucho menor. Se suele decir que a valores altos de LAI el espectro tiende a "saturarse" por lo que la señal se hace menos sensible.
> Es más fácil estimar el LAI con menor margen de error en cultivos con poca densidad foliar o fases fenológicas tempranas, que en cultivos o vegetación muy densa.
Ahora mantén el valor fijo de LAI en un valor alto (p.ej 3) y haz variar el ángulo de observación cenital entre 0º (nadir) y una obsrvación oblicua (p.ej 35º). Verás que a pesar de haber un LAI alto, y que a priori hemos visto que ya es menos sensible, hay mayores variaciones espectrales al variar la geometría de observación.
> Gracias a la anisotropía de la vegetación, las variaciones espectrales con respecto a la geometría de observación e iluminación pueden ayudar a resolver el LAI en condiciones de alta densidad.
Ahora mira el peso específico de la hoja, o la cantidad de materia seca (`Cm`). Verás que según el peso específico de la hora se producen variaciones importantes en el NIR y SWIR.
> La biomasa foliar puede calcularse a partir del producto entre el `LAI` y `Cm`, por lo que es viable estimar la biomasa foliar de un cultivo. Esta informaición puede ser útil por ejemplo para estimar el rendimiento final de algunos cultivos, como pueden ser los cereales.
El parámetro `hotspot` es un parámetro semi-empírico relacionado con el tamaño relativo de la hoja con respecto a la altura del dosel. Afecta a cómo las hojas ensombrecen otras hojas dentro del dosel, por lo que su efecto más fuerte se observará cuando el observador (sensor) está justo en la misma posición que el sol. Para ello valores similares para VZA y SZA, y el ángulo azimutal relativo PSI en 0º. Ahora haz variar el hotstpot. Al poner el observador en la zona iluminada de la vegetación, el tamaño relativo de las hojas juega un papel importante, ya que cuanto más grandes sean estas el volumen de copa directamente iluminado será mayor.

# La señal de un sensor
Hasta ahora hemos visto el comportamiento espectral detallado de la vegetación. Sin embargo los sensores a bordo de los satélites, aeroplanos y drones no miden todo el espectro en continuo, si no que muestrean tal espectro en torno a unas bandas específicas, estratégicamente seleccionadas con el fin de intentar capturar los aspectos biofísicos más relvantes.
Se denomina función de respuesta espectral a la forma en que un sensor específico integra el espectro con el fin de proporcionar la información en sus distintas bandas. Cada sensor, con cada una de sus bandas, tiene una función de respuesta espectral propia.
En esta tarea veremos las respuestas espectrales de los sensores que utilizaremos más comunmente, Landsat, Sentinel-2 y Sentinel-3. También veremos el comportamiento espectral de una cámara típica que se usa con drones.
Partimos de las simulaciones generadas anteriormente. Selecciona el sensor que quieras simular para ver como cada uno de los sensores "verían" esos mismos espectros.
```
w_rho_sensor = interactive(fn.sensor_sensitivity,
sensor=fn.w_sensor, spectra=fixed(w_sensitivity))
display(w_rho_sensor)
```
Realiza de nuevo un análisis de sensibilidad para la clorofila y compara la respuesta espectral que daría Landsat, Sentinel-2 y una camára UAV
# Derivación de parámetros de la vegetación
Hasta ahora hemos visto cómo el espectro de la superficie varía con respecto a los distintos parámetros biofísicos.
Sin embargo, nuestro objetivo final es el contrario, es decir, a partir de un espectro, o de unas determinadas bandas espectrales estimar una o varias variables biofísicas que nos son de interés. En particular para el objetivo del cálculo de la evapotranspiración y la eficiencia en el uso en el agua, nos puede interesar estimar el índice de área foliar y/o la fracción de radiación PAR absorbida, así como las clorofilas u otros pigmentos.
Una de los métodos típicos es desarrollar relaciones empíricas entre las bandas (o entre índices de vegetación) y datos muestreados en el campo. Esto puede darnos la respuesta más fiable para nuestra parcela de estudio, pero como has podido ver anteriormente la señal espectral depende de otros muchos factores, que pueden provocar que esa relación calibrada con unos cuantos muestreos locales no sea extrapolable o aplicable a otros cultivos o regiones.
Otra alternativa es desarrollar bases de datos sintéticas a partir de simulaciones. Es lo que vamos a realizar en esta tarea.
Vamos a ejecutar 5000 simulaciones haciendo variar los valores de los parámetros aleatoriamente según un rango de valores que puedan ser esperado en nuestras zonas de estudio.
Por ejemplo si trabajas con cultivos perennes puede que te interesa mantener un rango de LAI con valores mínimos sensiblemente superiores a cero, mientras si trabajas con cultivos anuales, el valor 0 es necesario para reflejar el desarrollo del cultivo desde su plantación, emergencia y madurez.
Ya que hay una gran cantidad de parámetros y es muy probable que desconozcamos el rango plausible en la mayoría de los cultivos, no te preocupes, deja los valores por defecto y céntrate en los parámetros en los que tengas más confianza.
Puedes también elegir uno o varios tipos de suelo, en función de la edafología de tu lugar.
> Incluso podrías subir un espectro de suelo típico de tu zona a la carpeta [./input/soil_spectral_library](./input/soil_spectral_library). Tan sólo asegúrate que el archivo de texto tenga dos columnas, la primera con las longitudes de onda de 400 a 2500 y la segunda columna con la reflectividad correspondiente. Para actualizar la librería espectral de suelos, tendrías que ejecutar también la [primera celda](#Instrucciones).
Finalmente selecciona el sensor para el que quieras genera la señal.
Cuando tengas tu configurado tu entorno de simulación, pincha en el botón `Generar Simulaciones`. El sistema tardará un rato pero al cabo de unos minutos te retornará una serie de gráficos.
> Es posible que recibas un mensaje de aviso, no te preocupes, en principio todo debería funcionar con normalidad.
```
w_rho_sensor = interactive(fn.build_random_simulations, {"manual": True, "manual_name": "Generar simulaciones"},
n_sim=fixed(5000), n_leaf_range=fn.w_range_nleaf,
cab_range=fn.w_range_cab, car_range=fn.w_range_car,
ant_range=fn.w_range_ant, cbrown_range=fn.w_range_cbrown,
cw_range=fn.w_range_cw, cm_range=fn.w_range_cm,
lai_range=fn.w_range_lai, hotspot_range=fn.w_range_hotspot,
leaf_angle_range=fn.w_range_leaf_angle,
sza=fn.w_sza, vza=fn.w_vza, psi=fn.w_psi,
skyl=fn.w_skyl, soil_names=fn.w_soils, sensor=fn.w_sensor)
display(w_rho_sensor)
```
El gráfico muestra 4 ejemplos de relaciones entre 3 índices de vegetación típicos y 4 variables biofísicas.
* NDVI: Normalized Difference Vegetation Index. Es el índicie de vegetación más utilizado. Generalmente se relaciona con el LAI, la biomasa foliar y/o la fracción de radiación interceptada/absorbida
$$NDVI = \frac{\rho_{NIR} - \rho_{R}}{\rho_{NIR} + \rho_{R}}$$
* NDRE: Normalized Difference Red-Edge. Es un índicie de vegetación que usa la región del red edge, por lo que no puede calcularse para cualquier sensor. Generalmente se relaciona con la clorofila.
$$NDRE = \frac{\rho_{NIR} - \rho_{R-E}}{\rho_{NIR} + \rho_{R-E}}$$
* NDWI: Normalized Difference Water Index. Es un índicie de vegetación que usa la región del SWIR, por lo que no puede calcularse para cualquier sensor. Generalmente se relaciona con el contenido de agua de la vegetación.
$$NDWI = \frac{\rho_{NIR} - \rho_{SWIR}}{\rho_{NIR} + \rho_{SWIR}}$$
Las simulaciones se han guardado en un archivo prosail_simulations.csv en la carpeta [./output](./output/prosail_simulations.csv). Descargate este archivo y calcula distintos índices de vegetación e intenta desarrollar relaciones y modelos estadísticos entre las bandas o índices de vegetación y los parámetros biofísicos. Para ello puedes usar cualquier software con el que estés habituado a trabajar (Excel, R, SPSS, ...).
Puedes realizar tantas simulaciones como consideres necesarias, por ejemplo variando el sensor o modificando los rangos plausibles para cubrir distintos tipos funcionales de vegetación. Tan sólo ten en cuenta que cada vez que se genere una simulación el archivo csv se sobreesecribirá. **Por lo que descárcatelo o haz una copia en tu carpeta virtual antes de volver a ejectura las nuevas simulaciones**.
# Conclusiones
En esta práctica hemos visto cómo el espectro de la vegetación responde a las variables biofísicas de la superfice.
* El LAI es probablemente la variable que influya más en la respuesta espectral de la vegetación.
* La concentración de clorofila en la hoja influye sobre todo en la región del visible y del *red-edge*.
* El contenido de agua y el peso específico de la hora influyen sobre todo a partir del NIR.
* La geometría de observación e iluminación, así como la respuesta espectral del suelo, influyen también en la señal. Esto hace que sea difícil aplicar una relación universal a la hora de estimar un parámetro biofísico.
* Los modelos de transferencia radiativa pueden ayudar a estimar estos parámetros. Si bien idealmente es necesario disponer de datos de campo para realizar tareas de validación y/o calibración estadística.
* Los sensores muestrean una parte del espectro en torno a bandas espectrales específicas. Por tanto una relación empírica desarrollada para un sensor específico puede que no sea aplicable o válida para otro sensor.
| github_jupyter |
# Detecting sound sources in YouTube videos
## First load all dependencies and set work and data paths
```
# set plotting parameters
%matplotlib inline
import matplotlib.pyplot as plt
# change notebook settings for wider screen
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# For embedding YouTube videos in Ipython Notebook
from IPython.display import YouTubeVideo
# and setting the time of the video in seconds
from datetime import timedelta
import numpy as np
import os
import sys
import urllib.request
import pandas as pd
sys.path.append(os.path.join('src', 'audioset_demos'))
from __future__ import print_function
# signal processing library
from scipy import signal
from scipy.io import wavfile
import wave
import six
import tensorflow as tf
import h5py
# Audio IO and fast plotting
import pyaudio
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph as pg
# Multiprocessing and threading
import multiprocessing
# Dependencies for creating deep VGGish embeddings
from src.audioset_demos import vggish_input
import vggish_input
import vggish_params
import vggish_postprocess
import vggish_slim
pca_params = 'vggish_pca_params.npz'
model_checkpoint = 'vggish_model.ckpt'
# Our YouTube video downloader based on youtube-dl module
from src.audioset_demos import download_youtube_wav as dl_yt
# third-party sounds processing and visualization library
import librosa
import librosa.display
# Set user
usr = 'maxvo'
MAXINT16 = np.iinfo(np.int16).max
print(MAXINT16)
FOCUS_CLASSES_ID = [0, 137, 62, 63, 500, 37]
#FOCUS_CLASSES_ID = [0, 137, 37, 40, 62, 63, 203, 208, 359, 412, 500]
class_labels = pd.read_csv(os.path.join('src', 'audioset_demos', 'class_labels_indices.csv'))
CLASS_NAMES = class_labels.loc[:, 'display_name'].tolist()
FOCUS_CLASS_NAME_FRAME = class_labels.loc[FOCUS_CLASSES_ID, 'display_name']
FOCUS_CLASS_NAME = FOCUS_CLASS_NAME_FRAME.tolist()
print("Chosen classes for experiments:")
print(FOCUS_CLASS_NAME_FRAME)
# Set current working directory
src_dir = os.getcwd()
# Set raw wav-file data directories for placing downloaded audio
raw_dir = os.path.join(src_dir, 'data' ,'audioset_demos', 'raw')
short_raw_dir = os.path.join(src_dir, 'data', 'audioset_demos', 'short_raw')
if not os.path.exists(short_raw_dir):
os.makedirs(short_raw_dir)
if not os.path.exists(raw_dir):
os.makedirs(raw_dir)
audioset_data_path = os.path.join('data', 'audioset_demos', 'audioset', 'packed_features')
```
## Download model parameters and PCA embedding
```
if not os.path.isfile(os.path.join('src', 'audioset_demos', 'vggish_model.ckpt')):
urllib.request.urlretrieve(
"https://storage.googleapis.com/audioset/vggish_model.ckpt",
filename=os.path.join('src', 'audioset_demos', 'vggish_model.ckpt')
)
if not os.path.isfile(os.path.join('src', 'audioset_demos', 'vggish_pca_params.npz')):
urllib.request.urlretrieve(
"https://storage.googleapis.com/audioset/vggish_pca_params.npz",
filename=os.path.join('src', 'audioset_demos', 'vggish_pca_params.npz')
)
if not os.path.isfile(os.path.join('data', 'audioset_demos', 'features.tar.gz')):
urllib.request.urlretrieve(
"https://storage.googleapis.com/eu_audioset/youtube_corpus/v1/features/features.tar.gz",
filename=os.path.join('data', 'audioset_demos', 'features.tar.gz')
)
import gzip
import shutil
with gzip.open(os.path.join('data', 'audioset_demos', 'features.tar.gz'), 'rb') as f_in:
with open('packed_features', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def save_data(hdf5_path, x, video_id_list, y=None):
with h5py.File(hdf5_path, 'w') as hf:
hf.create_dataset('x', data=x)
hf.create_dataset('y', data=y)
hf.create_dataset('video_id_list', data=video_id_list, dtype='S11')
def load_data(hdf5_path):
with h5py.File(hdf5_path, 'r') as hf:
x = hf['x'][:]
if hf['y'] is not None:
y = hf['y'][:]
else:
y = hf['y']
video_id_list = hf['video_id_list'][:].tolist()
return x, y, video_id_list
def time_str_to_sec(time_str='00:00:00'):
time_str_list = time_str.split(':')
seconds = int(
timedelta(
hours=int(time_str_list[0]),
minutes=int(time_str_list[1]),
seconds=int(time_str_list[2])
).total_seconds()
)
return seconds
class miniRecorder:
def __init__(self, seconds=4, sampling_rate=16000):
self.FORMAT = pyaudio.paInt16 #paFloat32 #paInt16
self.CHANNELS = 1 # Must be Mono
self.RATE = sampling_rate # sampling rate (Hz), 22050 was used for this application
self.FRAMESIZE = 4200 # buffer size, number of data points to read at a time
self.RECORD_SECONDS = seconds + 1 # how long should the recording (approx) be
self.NOFRAMES = int((self.RATE * self.RECORD_SECONDS) / self.FRAMESIZE) # number of frames needed
def record(self):
# instantiate pyaudio
p = pyaudio.PyAudio()
# open stream
stream = p.open(format=self.FORMAT,
channels=self.CHANNELS,
rate=self.RATE,
input=True,
frames_per_buffer=self.FRAMESIZE)
# discard the first part of the recording
discard = stream.read(self.FRAMESIZE)
print('Recording...')
data = stream.read(self.NOFRAMES * self.FRAMESIZE)
decoded = np.frombuffer(data, dtype=np.int16) #np.float32)
print('Finished...')
stream.stop_stream()
stream.close()
p.terminate()
# Remove first second to avoid "click" sound from starting recording
self.sound_clip = decoded[self.RATE:]
class Worker(QtCore.QRunnable):
'''
Worker thread
Inherits from QRunnable to handler worker thread setup, signals and wrap-up.
:param callback: The function callback to run on this worker thread. Supplied args and
kwargs will be passed through to the runner.
:type callback: function
:param args: Arguments to pass to the callback function
:param kwargs: Keywords to pass to the callback function
'''
def __init__(self, fn, *args, **kwargs):
super(Worker, self).__init__()
# Store constructor arguments (re-used for processing)
self.fn = fn
self.args = args
self.kwargs = kwargs
@QtCore.pyqtSlot()
def run(self):
'''
Initialise the runner function with passed args, kwargs.
'''
self.fn(*self.args, **self.kwargs)
class AudioFile:
def __init__(self, file, chunk):
""" Init audio stream """
self.chunk = chunk
self.data = ''
self.wf = wave.open(file, 'rb')
self.p = pyaudio.PyAudio()
self.stream = self.p.open(
format = self.p.get_format_from_width(self.wf.getsampwidth()),
channels = self.wf.getnchannels(),
rate = self.wf.getframerate(),
output = True
)
def play(self):
""" Play entire file """
self.data = self.wf.readframes(self.chunk)
while self.data:
self.stream.write(self.data)
self.data = self.wf.readframes(self.chunk)
self.close()
def close(self):
""" Graceful shutdown """
self.stream.close()
self.p.terminate()
def read(self, chunk, exception_on_overflow=False):
return self.data
class App(QtGui.QMainWindow):
def __init__(self,
predictor,
n_top_classes=10,
plot_classes=FOCUS_CLASSES_ID,
parent=None):
super(App, self).__init__(parent)
### Predictor model ###
self.predictor = predictor
self.n_classes = predictor.n_classes
self.n_top_classes = n_top_classes
self.plot_classes = plot_classes
self.n_plot_classes = len(self.plot_classes)
### Start/stop control variable
self.continue_recording = False
self._timerId = None
### Settings ###
self.rate = 16000 # sampling rate
self.chunk = 1000 # reading chunk sizes,
#self.rate = 22050 # sampling rate
#self.chunk = 2450 # reading chunk sizes, make it a divisor of sampling rate
#self.rate = 44100 # sampling rate
#self.chunk = 882 # reading chunk sizes, make it a divisor of sampling rate
self.nperseg = 400 # samples pr segment for spectrogram, scipy default is 256
# self.nperseg = 490 # samples pr segment for spectrogram, scipy default is 256
self.noverlap = 0 # overlap between spectrogram windows, scipt default is nperseg // 8
self.tape_length = 20 # length of running tape
self.plot_length = 10 * self.rate
self.samples_passed = 0
self.pred_length = 10
self.pred_samples = self.rate * self.pred_length
self.start_tape() # initialize the tape
self.eps = np.finfo(float).eps
# Interval between predictions in number of samples
self.pred_intv = (self.tape_length // 4) * self.rate
self.pred_step = 10 * self.chunk
self.full_tape = False
#### Create Gui Elements ###########
self.mainbox = QtGui.QWidget()
self.setCentralWidget(self.mainbox)
self.mainbox.setLayout(QtGui.QVBoxLayout())
self.canvas = pg.GraphicsLayoutWidget()
self.mainbox.layout().addWidget(self.canvas)
self.label = QtGui.QLabel()
self.mainbox.layout().addWidget(self.label)
# Thread pool for prediction worker coordination
self.threadpool = QtCore.QThreadPool()
# self.threadpool_plot = QtCore.QThreadPool()
print("Multithreading with maximum %d threads" % self.threadpool.maxThreadCount())
# Play, record and predict button in toolbar
'''
self.playTimer = QtCore.QTimer()
self.playTimer.setInterval(500)
self.playTimer.timeout.connect(self.playTick)
self.toolbar = self.addToolBar("Play")
self.playScansAction = QtGui.QAction(QtGui.QIcon("control_play_blue.png"), "play scans", self)
self.playScansAction.triggered.connect(self.playScansPressed)
self.playScansAction.setCheckable(True)
self.toolbar.addAction(self.playScansAction)
'''
# Buttons and user input
btn_brow_1 = QtGui.QPushButton('Start/Stop Recording', self)
btn_brow_1.setGeometry(300, 15, 250, 25)
#btn_brow_4.clicked.connect(support.main(fname_points, self.fname_stl_indir, self.fname_stl_outdir))
# Action: Start or stop recording
btn_brow_1.clicked.connect(lambda: self.press_record())
btn_brow_2 = QtGui.QPushButton('Predict', self)
btn_brow_2.setGeometry(20, 15, 250, 25)
# Action: predict on present tape roll
btn_brow_2.clicked.connect(
lambda: self.start_predictions(
sound_clip=self.tape,
full_tape=False
)
)
self.le1 = QtGui.QLineEdit(self)
self.le1.setGeometry(600, 15, 250, 21)
self.yt_video_id = str(self.le1.text())
self.statusBar().showMessage("Ready")
# self.toolbar = self.addToolBar('Exit')
# self.toolbar.addAction(exitAction)
self.setGeometry(300, 300, 1400, 1200)
self.setWindowTitle('Live Audio Event Detector')
# self.show()
# line plot
self.plot = self.canvas.addPlot()
self.p1 = self.plot.plot(pen='r')
self.plot.setXRange(0, self.plot_length)
self.plot.setYRange(-0.5, 0.5)
self.plot.hideAxis('left')
self.plot.hideAxis('bottom')
self.canvas.nextRow()
# spectrogram
self.view = self.canvas.addViewBox()
self.view.setAspectLocked(False)
self.view.setRange(QtCore.QRectF(0,0, self.spec.shape[1], 100))
# image plot
self.img = pg.ImageItem() #(border='w')
self.view.addItem(self.img)
# bipolar colormap
pos = np.array([0., 1., 0.5, 0.25, 0.75])
color = np.array([[0,255,255,255], [255,255,0,255], [0,0,0,255], (0, 0, 255, 255), (255, 0, 0, 255)], dtype=np.ubyte)
cmap = pg.ColorMap(pos, color)
lut = cmap.getLookupTable(0.0, 1.0, 256)
self.img.setLookupTable(lut)
self.img.setLevels([-15, -5])
self.canvas.nextRow()
# create bar chart
#self.view2 = self.canvas.addViewBox()
# dummy data
#self.x = np.arange(self.n_top_classes)
#self.y1 = np.linspace(0, self.n_classes, num=self.n_top_classes)
#self.bg1 = pg.BarGraphItem(x=self.x, height=self.y1, width=0.6, brush='r')
#self.view2.addItem(self.bg1)
# Prediction line plot
self.plot2 = self.canvas.addPlot()
self.plot2.addLegend()
self.plot_list = [None]*self.n_plot_classes
for i in range(self.n_plot_classes):
self.plot_list[i] = self.plot2.plot(
pen=pg.intColor(i),
name=CLASS_NAMES[self.plot_classes[i]]
)
self.plot2.setXRange(0, self.plot_length)
self.plot2.setYRange(0.0, 1.0)
self.plot2.hideAxis('left')
self.plot2.hideAxis('bottom')
# self.canvas.nextRow()
#### Start #####################
# self.p = pyaudio.PyAudio()
# self.start_stream()
# self._update()
def playScansPressed(self):
if self.playScansAction.isChecked():
self.playTimer.start()
else:
self.playTimer.stop()
def playTick(self):
self._update()
def start_stream(self):
if not self.yt_video_id:
self.stream = self.p.open(
format=pyaudio.paFloat32,
channels=1,
rate=self.rate,
input=True,
frames_per_buffer=self.chunk
)
else:
self.stream = AudioFile(self.yt_video_id, self.chunk)
self.stream.play()
def close_stream(self):
self.stream.stop_stream()
self.stream.close()
self.p.terminate()
# self.exit_pool()
def read_stream(self):
self.raw = self.stream.read(self.chunk, exception_on_overflow=False)
data = np.frombuffer(self.raw, dtype=np.float32)
return self.raw, data
def start_tape(self):
self.tape = np.zeros(self.tape_length * self.rate)
# empty spectrogram tape
self.f, self.t, self.Sxx = signal.spectrogram(
self.tape[-self.plot_length:],
self.rate,
nperseg=self.nperseg,
noverlap=self.noverlap,
detrend=False,
return_onesided=True,
mode='magnitude'
)
self.spec = np.zeros(self.Sxx.shape)
self.pred = np.zeros((self.n_plot_classes, self.plot_length))
def tape_add(self):
if self.continue_recording:
raw, audio = self.read_stream()
self.tape[:-self.chunk] = self.tape[self.chunk:]
self.tape[-self.chunk:] = audio
self.samples_passed += self.chunk
# spectrogram on whole tape
# self.f, self.t, self.Sxx = signal.spectrogram(self.tape, self.rate)
# self.spec = self.Sxx
# spectrogram on last added part of tape
self.f, self.t, self.Sxx = signal.spectrogram(self.tape[-self.chunk:],
self.rate,
nperseg=self.nperseg,
noverlap=self.noverlap)
spec_chunk = self.Sxx.shape[1]
self.spec[:, :-spec_chunk] = self.spec[:, spec_chunk:]
# Extend spectrogram after converting to dB scale
self.spec[:, -spec_chunk:] = np.log10(abs(self.Sxx) + self.eps)
self.pred[:, :-self.chunk] = self.pred[:, self.chunk:]
'''
if (self.samples_passed % self.pred_intv) == 0:
sound_clip = self.tape # (MAXINT16 * self.tape).astype('int16') / 32768.0
if self.full_tape:
# predictions on full tape
pred_chunk = self.predictor.predict(
sound_clip=sound_clip[-self.pred_intv:],
sample_rate=self.rate
)[0][self.plot_classes]
self.pred[:, -self.pred_intv:] = np.asarray(
(self.pred_intv) * [pred_chunk]).transpose()
else:
# prediction, on some snip of the last part of the signal
# 1 s seems to be the shortest time frame with reliable predictions
self.start_predictions(sound_clip)
'''
def start_predictions(self, sound_clip=None, full_tape=False):
#self.samples_passed_at_predict = self.samples_passed
if sound_clip is None:
sound_clip = self.tape
if full_tape:
worker = Worker(self.provide_prediction, *(), **{
"sound_clip": sound_clip,
"pred_start": -self.pred_samples,
"pred_stop": None,
"pred_step": self.pred_samples
}
)
self.threadpool.start(worker)
else:
for chunk in range(0, self.pred_intv, self.pred_step):
pred_start = - self.pred_intv - self.pred_samples + chunk
pred_stop = - self.pred_intv + chunk
worker = Worker(self.provide_prediction, *(), **{
"sound_clip": sound_clip,
"pred_start": pred_start,
"pred_stop": pred_stop,
"pred_step": self.pred_step
}
)
self.threadpool.start(worker)
def provide_prediction(self, sound_clip, pred_start, pred_stop, pred_step):
#samples_passed_since_predict = self.samples_passed - self.samples_passed_at_predict
#pred_stop -= samples_passed_since_predict
pred_chunk = self.predictor.predict(
sound_clip=sound_clip[pred_start:pred_stop],
sample_rate=self.rate
)[0][self.plot_classes]
#samples_passed_since_predict = self.samples_passed - self.samples_passed_at_predict - samples_passed_since_predict
#pred_stop -= samples_passed_since_predict
if pred_stop is not None:
pred_stop_step = pred_stop - pred_step
else:
pred_stop_step = None
self.pred[:, pred_stop_step:pred_stop] = np.asarray(
(pred_step) * [pred_chunk]
).transpose()
def exit_pool(self):
"""
Exit all QRunnables and delete QThreadPool
"""
# When trying to quit, the application takes a long time to stop
self.threadpool.globalInstance().waitForDone()
self.threadpool.deleteLater()
sys.exit(0)
def press_record(self):
self.yt_video_id = str(self.le1.text())
# Switch between continue recording or stopping it
# Start or avoid starting recording dependent on last press
if self.continue_recording:
self.continue_recording = False
#if self._timerId is not None:
# self.killTimer(self._timerId)
self.close_stream()
else:
self.continue_recording = True
self.p = pyaudio.PyAudio()
self.start_stream()
self._update()
def _update(self):
try:
if self.continue_recording:
self.tape_add()
# self.img.setImage(self.spec.T)
#kwargs = {
# "image": self.spec.T,
# "autoLevels": False,
#
#worker = Worker(self.img.setImage, *(), **kwargs)
#self.threadpool_plot.start(worker)
self.img.setImage(self.spec.T, autoLevels=False)
#worker = Worker(
# self.p1.setData,
# *(),
# **{'y': self.tape[-self.plot_length:]}
#)
#self.threadpool_plot.start(worker)
self.p1.setData(self.tape[-self.plot_length:])
#pred_var = np.var(self.pred, axis=-1)
#pred_mean = np.mean(self.pred, axis=-1)
#class_cand = np.where( (pred_mean > 0.001)*(pred_var > 0.01) )
# n_classes_incl = min(self.n_top_classes, class_cand[0].shape[0])
# print(n_classes_incl)
for i in range(self.n_plot_classes):
#worker = Worker(
# self.plot_list[i].setData,
# *(),
# **{'y': self.pred[i,:]}
#)
#self.threadpool_plot.start(worker)
self.plot_list[i].setData(self.pred[i,:]) # self.plot_classes[i],:])
#self.bg1.setOpts(
# height=self.y1
#)
#self.bg1.setOpts(
# height=np.sort(
# self.pred[:, -1]
# )[-1:-(self.n_top_classes+1):-1]
#)
#print(np.max(self.tape), np.min(self.tape))
# self.label.setText('Class: {0:0.3f}'.format(self.pred[-1]))
QtCore.QTimer.singleShot(1, self._update)
except KeyboardInterrupt:
self.close_stream()
from AudioSetClassifier import AudioSetClassifier
# model_type='decision_level_single_attention',
# balance_type='balance_in_batch',
# at_iteration=50000
#ASC = AudioSetClassifier(
# model_type='decision_level_max_pooling', #single_attention',
# balance_type='balance_in_batch',
# iters=50000
#)
ASC = AudioSetClassifier()
app=0 #This is the solution
app = QtGui.QApplication(sys.argv)
MainApp = App(predictor=ASC)
MainApp.show()
sys.exit(app.exec_())
minirec = miniRecorder(seconds=10, sampling_rate=16000)
minirec.record()
minirec_pred = ASC.predict(sound_clip=minirec.sound_clip / 32768.0, sample_rate=16000)
print(minirec_pred[:,[0, 37, 62, 63]])
max_prob_classes = np.argsort(minirec_pred, axis=-1)[:, ::-1]
max_prob = np.sort(minirec_pred, axis=-1)[:,::-1]
print(max_prob.shape)
example = pd.DataFrame(class_labels['display_name'][max_prob_classes[0,:10]])
example.loc[:, 'prob'] = pd.Series(max_prob[0, :10], index=example.index)
print(example)
example.plot.bar(x='display_name', y='prob', rot=90)
plt.show()
print()
```
## Parameters for how to plot audio
```
# Sample rate
# this has to be at least twice of max frequency which we've entered
# but you can play around with different sample rates and see how this
# affects the results;
# since we generated this audio, the sample rate is the bitrate
sample_rate = vggish_params.SAMPLE_RATE
# size of audio FFT window relative to sample_rate
n_window = 1024
# overlap between adjacent FFT windows
n_overlap = 360
# number of mel frequency bands to generate
n_mels = 64
# max duration of short video clips
duration = 10
# note frequencies https://pages.mtu.edu/~suits/notefreqs.html
freq1 = 512.
freq2 = 1024.
# fmin and fmax for librosa filters in Hz - used for visualization purposes only
fmax = max(freq1, freq2)*8 + 1000.
fmin = 0.
# stylistic change to the notebook
fontsize = 14
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.serif'] = 'Ubuntu'
plt.rcParams['font.monospace'] = 'Ubuntu Mono'
plt.rcParams['font.size'] = fontsize
plt.rcParams['axes.labelsize'] = fontsize
plt.rcParams['axes.labelweight'] = 'bold'
plt.rcParams['axes.titlesize'] = fontsize
plt.rcParams['xtick.labelsize'] = fontsize
plt.rcParams['ytick.labelsize'] = fontsize
plt.rcParams['legend.fontsize'] = fontsize
plt.rcParams['figure.titlesize'] = fontsize
```
## Choosing video IDs and start times before download
```
video_ids = [
'BaW_jenozKc',
'E6sS2d-NeTE',
'xV0eTva6SKQ',
'2Szah76TMgo',
'g38kRk6YAA0',
'OkkkPAE9KvE',
'N1zUp9aPFG4'
]
video_start_time_str = [
'00:00:00',
'00:00:10',
'00:00:05',
'00:00:02',
'00:03:10',
'00:00:10',
'00:00:06'
]
video_start_time = list(map(time_str_to_sec, video_start_time_str))
```
## Download, save and cut video audio
```
video_titles = []
maxv = np.iinfo(np.int16).max
for i, vid in enumerate(video_ids):
# Download and store video under data/raw/
video_title = dl_yt.download_youtube_wav(
video_id=vid,
raw_dir=raw_dir,
short_raw_dir=short_raw_dir,
start_sec=video_start_time[i],
duration=duration,
sample_rate=sample_rate
)
video_titles += [video_title]
print()
'''
audio_path = os.path.join(raw_dir, vid) + '.wav'
short_audio_path = os.path.join(short_raw_dir, vid) + '.wav'
# Load and downsample audio to 16000
# audio is a 1D time series of the sound
# can also use (audio, fs) = soundfile.read(audio_path)
(audio, fs) = librosa.load(
audio_path,
sr = sample_rate,
offset = video_start_time[i],
duration = duration
)
# Store downsampled 10sec clip under data/short_raw/
wavfile.write(
filename=short_audio_path,
rate=sample_rate,
data=(audio * maxv).astype(np.int16)
)
'''
# Usage example for pyaudio
i = 6
a = AudioFile(
os.path.join(short_raw_dir, video_ids[i]) + '.wav',
chunk = 1000
)
a.play()
a.close()
```
## Retrieve VGGish PCA embeddings
```
video_vggish_emb = []
# Restore VGGish model trained on YouTube8M dataset
# Retrieve PCA-embeddings of bottleneck features
with tf.Graph().as_default(), tf.Session() as sess:
# Define the model in inference mode, load the checkpoint, and
# locate input and output tensors.
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, model_checkpoint)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
for i, vid in enumerate(video_ids):
audio_path = os.path.join(short_raw_dir, vid) + '.wav'
examples_batch = vggish_input.wavfile_to_examples(audio_path)
print(examples_batch.shape)
# Prepare a postprocessor to munge the model embeddings.
pproc = vggish_postprocess.Postprocessor(pca_params)
# Run inference and postprocessing.
[embedding_batch] = sess.run([embedding_tensor],
feed_dict={features_tensor: examples_batch})
print(embedding_batch.shape)
postprocessed_batch = pproc.postprocess(embedding_batch)
print(postprocessed_batch.shape)
video_vggish_emb.extend([postprocessed_batch])
print(len(video_vggish_emb))
```
## Plot audio, transformations and embeddings
### Function for visualising audio
```
def plot_audio(audio, emb):
audio_sec = audio.shape[0]/sample_rate
# Make a new figure
plt.figure(figsize=(18, 16), dpi= 60, facecolor='w', edgecolor='k')
plt.subplot(511)
# Display the spectrogram on a mel scale
librosa.display.waveplot(audio, int(sample_rate), max_sr = int(sample_rate))
plt.title('Raw audio waveform @ %d Hz' % sample_rate, fontsize = fontsize)
plt.xlabel("Time (s)")
plt.ylabel("Amplitude")
# Define filters and windows
melW =librosa.filters.mel(sr=sample_rate, n_fft=n_window, n_mels=n_mels, fmin=fmin, fmax=fmax)
ham_win = np.hamming(n_window)
# Compute fft to spectrogram
[f, t, x] = signal.spectral.spectrogram(
x=audio,
window=ham_win,
nperseg=n_window,
noverlap=n_overlap,
detrend=False,
return_onesided=True,
mode='magnitude')
# Apply filters and log transformation
x_filtered = np.dot(x.T, melW.T)
x_logmel = np.log(x_filtered + 1e-8)
x_logmel = x_logmel.astype(np.float32)
# Display frequency power spectrogram
plt.subplot(512)
x_coords = np.linspace(0, audio_sec, x.shape[0])
librosa.display.specshow(
x.T,
sr=sample_rate,
x_axis='time',
y_axis='hz',
x_coords=x_coords
)
plt.xlabel("Time (s)")
plt.title("FFT spectrogram (dB)", fontsize = fontsize)
# optional colorbar plot
plt.colorbar(format='%+02.0f dB')
# Display log-mel freq. power spectrogram
plt.subplot(513)
x_coords = np.linspace(0, audio_sec, x_logmel.shape[0])
librosa.display.specshow(
x_logmel.T,
sr=sample_rate,
x_axis='time',
y_axis='mel',
x_coords=x_coords
)
plt.xlabel("Time (s)")
plt.title("Mel power spectrogram used in DCASE 2017 (dB)", fontsize = fontsize)
# optional colorbar plot
plt.colorbar(format='%+02.0f dB')
# Display embeddings
plt.subplot(514)
x_coords = np.linspace(0, audio_sec, emb.shape[0])
librosa.display.specshow(
emb.T,
sr=sample_rate,
x_axis='time',
y_axis=None,
x_coords=x_coords
)
plt.xlabel("Time (s)")
plt.colorbar()
plt.subplot(515)
plt.scatter(
x=emb[:, 0],
y=emb[:, 1],
)
plt.xlabel("PC_1")
plt.ylabel("PC_2")
# Make the figure layout compact
plt.tight_layout()
plt.show()
```
### Visualise all clips of audio chosen
### Visualise one clip of audio and embed YouTube video for comparison
```
i = 4
vid = video_ids[i]
audio_path = os.path.join(raw_dir, vid) + '.wav'
# audio is a 1D time series of the sound
# can also use (audio, fs) = soundfile.read(audio_path)
(audio, fs) = librosa.load(
audio_path,
sr = sample_rate,
offset = video_start_time[i],
duration = duration
)
plot_audio(audio, video_vggish_emb[i])
start=int(
timedelta(
hours=0,
minutes=0,
seconds=video_start_time[i]
).total_seconds()
)
YouTubeVideo(
vid,
start=start,
end=start+duration,
autoplay=0,
theme="light",
color="red"
)
# Plot emb with scatter
# Check first couple of PCs,
# for both train and test data, to see if the test is lacking variance
```
## Evaluate trained audio detection model
```
import audio_event_detection_model as AEDM
import utilities
from sklearn import metrics
model = AEDM.CRNN_audio_event_detector()
```
### Evaluating model on audio downloaded
```
(x_user_inp, y_user_inp) = utilities.transform_data(
np.array(video_vggish_emb)
)
predictions = model.predict(
x=x_user_inp
)
```
### Evaluating model on training data
```
(x_tr, y_tr, vid_tr) = load_data(os.path.join(audioset_data_path, 'bal_train.h5'))
(x_tr, y_tr) = utilities.transform_data(x_tr, y_tr)
pred_tr = model.predict(x=x_tr)
print(pred_tr.max())
print(metrics.accuracy_score(y_tr, (pred_tr > 0.5).astype(np.float32)))
print(metrics.roc_auc_score(y_tr, pred_tr))
print(np.mean(metrics.roc_auc_score(y_tr, pred_tr, average=None)))
stats = utilities.calculate_stats(pred_tr, y_tr)
mAUC = np.mean([stat['auc'] for stat in stats])
max_prob_classes = np.argsort(predictions, axis=-1)[:, ::-1]
max_prob = np.sort(predictions, axis=-1)[:,::-1]
print(mAUC)
print(max_prob.max())
print(max_prob[:,:10])
print(predictions.shape)
print(max_prob_classes[:,:10])
from numpy import genfromtxt
import pandas as pd
class_labels = pd.read_csv('class_labels_indices.csv')
print(class_labels['display_name'][max_prob_classes[5,:10]])
for i, vid in enumerate(video_ids[0]):
print(video_titles[i])
print()
example = pd.DataFrame(class_labels['display_name'][max_prob_classes[i,:10]])
example.loc[:, 'prob'] = pd.Series(max_prob[i, :10], index=example.index)
print(example)
example.plot.bar(x='display_name', y='prob', rot=90)
plt.show()
print()
```
## Investigating model predictions on downloaded audio clips
```
i = 0
vid = video_ids[i]
print(video_titles[i])
print()
YouTubeVideo(
vid,
start=start,
end=start+duration,
autoplay=0,
theme="light",
color="red"
)
example = pd.DataFrame(class_labels['display_name'][max_prob_classes[i,:10]])
example.loc[:, 'prob'] = pd.Series(max_prob[i, :10], index=example.index)
print(example)
example.plot.bar(x='display_name', y='prob', rot=90)
plt.show()
print()
#eval_metrics = model.evaluate(x=x_tr, y=y_tr)
#for i, metric_name in enumerate(model.metrics_names):
# print("{}: {:1.4f}".format(metric_name, eval_metrics[i]))
#qtapp = App(model)
from AudioSetClassifier import AudioSetClassifier
import time
ASC = AudioSetClassifier()
sound_clip = os.path.join(short_raw_dir, video_ids[1]) + '.wav'
t0 = time.time()
test_pred = ASC.predict(sound_clip=sound_clip)
t1 = time.time()
print('Time spent on 1 forward pass prediction:', t1-t0)
print(test_pred.shape)
for i, vid in enumerate(video_ids):
print(video_titles[i])
print()
sound_clip = os.path.join(short_raw_dir, vid) + '.wav'
predictions = ASC.predict(sound_clip=sound_clip)
max_prob_classes = np.argsort(predictions, axis=-1)[:, ::-1]
max_prob = np.sort(predictions, axis=-1)[:,::-1]
print(max_prob.shape)
example = pd.DataFrame(class_labels['display_name'][max_prob_classes[0,:10]])
example.loc[:, 'prob'] = pd.Series(max_prob[0, :10], index=example.index)
print(example)
example.plot.bar(x='display_name', y='prob', rot=90)
plt.show()
print()
import sys
app=0 #This is the solution
app = QtGui.QApplication(sys.argv)
MainApp = App(predictor=ASC)
MainApp.show()
sys.exit(app.exec_())
#from PyQt4 import QtGui, QtCore
class SimpleWindow(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self.setGeometry(300, 300, 200, 80)
self.setWindowTitle('Hello World')
quit = QtGui.QPushButton('Close', self)
quit.setGeometry(10, 10, 60, 35)
self.connect(quit, QtCore.SIGNAL('clicked()'),
self, QtCore.SLOT('close()'))
if __name__ == '__main__':
app = QtCore.QCoreApplication.instance()
if app is None:
app = QtGui.QApplication([])
sw = SimpleWindow()
sw.show()
try:
from IPython.lib.guisupport import start_event_loop_qt4
start_event_loop_qt4(app)
except ImportError:
app.exec_()
```
1. Understand attention
2. Understand filters
3. Understand Multi-label, hierachical, knowledge graphs
4. Understand class imbalance
5. CCA on VGGish vs. ResNet audioset emb. to check if there's a linear connection.
6. Train linear layer to convert VGGish emb. to ResNet-50 emb.
Plot in GUI:
1. Exclude all non-active classes
2. Draw class names on curves going up
3. Remove histogram
4. Make faster
```
video_vggish_emb = []
test_wav_path = os.path.join(src_dir, 'data', 'wav_file')
wav_files = os.listdir(test_wav_path)
example_names = []
# Restore VGGish model trained on YouTube8M dataset
# Retrieve PCA-embeddings of bottleneck features
with tf.Graph().as_default(), tf.Session() as sess:
# Define the model in inference mode, load the checkpoint, and
# locate input and output tensors.
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, model_checkpoint)
features_tensor = sess.graph.get_tensor_by_name(
vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(
vggish_params.OUTPUT_TENSOR_NAME)
# Prepare a postprocessor to munge the model embeddings.
pproc = vggish_postprocess.Postprocessor(pca_params)
for i, vid in enumerate(wav_files):
audio_path = os.path.join(test_wav_path, vid)
print(vid)
examples_batch = vggish_input.wavfile_to_examples(audio_path)
print(examples_batch.shape)
# Run inference and postprocessing.
[embedding_batch] = sess.run([embedding_tensor],
feed_dict={features_tensor: examples_batch})
print(embedding_batch.shape)
postprocessed_batch = pproc.postprocess(embedding_batch)
batch_shape = postprocessed_batch.shape
print(batch_shape)
if batch_shape[0] > 10:
postprocessed_batch = postprocessed_batch[:10]
elif batch_shape[0] < 10:
zero_pad = np.zeros((10, 128))
zero_pad[:batch_shape[0]] = postprocessed_batch
postprocessed_batch = zero_pad
print(postprocessed_batch.shape)
if postprocessed_batch.shape[0] == 10:
video_vggish_emb.extend([postprocessed_batch])
example_names.extend([vid])
print(len(video_vggish_emb))
import audio_event_detection_model as AEDM
import utilities
model = AEDM.CRNN_audio_event_detector()
(x_user_inp, y_user_inp) = utilities.transform_data(
np.array(video_vggish_emb)
)
predictions_AEDM = model.predict(
x=x_user_inp
)
predictions_ASC = np.zeros([len(wav_files), 527])
for i, vid in enumerate(wav_files):
audio_path = os.path.join(test_wav_path, vid)
predictions_ASC[i] = ASC.predict(sound_clip=audio_path)
qkong_res = '''2018Q1Q10Q17Q12Q59Q512440Q-5889Q.fea_lab ['Speech'] [0.8013877]
12_4_train ambience.fea_lab ['Vehicle', 'Rail transport', 'Train', 'Railroad car, train wagon'] [0.38702238, 0.6618184, 0.7742054, 0.5886036]
19_3_forest winter.fea_lab ['Animal'] [0.16109303]
2018Q1Q10Q17Q58Q49Q512348Q-5732Q.fea_lab ['Speech'] [0.78335935]
15_1_whistle.fea_lab ['Whistling'] [0.34013063] ['music']
2018Q1Q10Q13Q52Q8Q512440Q-5889Q.fea_lab ['Speech'] [0.7389336]
09_2_my guitar.fea_lab ['Music', 'Musical instrument', 'Plucked string instrument', 'Guitar'] [0.84308875, 0.48860216, 0.43791085, 0.47915566]
2018Q1Q10Q13Q29Q46Q512440Q-5889Q.fea_lab ['Vehicle'] [0.18344605]
05_2_DFA.fea_lab ['Music', 'Musical instrument', 'Plucked string instrument', 'Guitar'] [0.93665695, 0.57123834, 0.53891456, 0.63112855]
'''
q_kong_res = {
'2018Q1Q10Q17Q12Q59Q512440Q-5889Q.wav': (['Speech'], [0.8013877]),
'12_4_train ambience.wav': (['Vehicle', 'Rail transport', 'Train', 'Railroad car, train wagon'], [0.38702238, 0.6618184, 0.7742054, 0.5886036]),
'19_3_forest winter.wav': (['Animal'], [0.16109303]),
'2018Q1Q10Q17Q58Q49Q512348Q-5732Q.wav': (['Speech'], [0.78335935]),
'15_1_whistle.wav': (['Whistling'], [0.34013063], ['music']),
'2018Q1Q10Q13Q52Q8Q512440Q-5889Q.wav': (['Speech'], [0.7389336]),
'09_2_my guitar.wav': (['Music', 'Musical instrument', 'Plucked string instrument', 'Guitar'], [0.84308875, 0.48860216, 0.43791085, 0.47915566]),
'2018Q1Q10Q13Q29Q46Q512440Q-5889Q.wav': (['Vehicle'], [0.18344605]),
'05_2_DFA.wav': (['Music', 'Musical instrument', 'Plucked string instrument', 'Guitar'], [0.93665695, 0.57123834, 0.53891456, 0.63112855])
}
#test_examples_res = qkong_res.split('\n')
#print(test_examples_res)
#rint()
#split_fun = lambda x: x.split(' [')
#test_examples_res = list(map(split_fun, test_examples_res))#
# print(test_examples_res)
max_prob_classes_AEDM = np.argsort(predictions_AEDM, axis=-1)[:, ::-1]
max_prob_AEDM = np.sort(predictions_AEDM, axis=-1)[:,::-1]
max_prob_classes_ASC = np.argsort(predictions_ASC, axis=-1)[:, ::-1]
max_prob_ASC = np.sort(predictions_ASC, axis=-1)[:,::-1]
for i in range(len(wav_files)):
print(wav_files[i])
print(max_prob_classes_AEDM[i,:10])
print(max_prob_AEDM[i,:10])
print()
print(max_prob_classes_ASC[i,:10])
print(max_prob_ASC[i,:10])
print()
print()
```
2018Q1Q10Q17Q12Q59Q512440Q-5889Q.wav
2018Q1Q10Q13Q52Q8Q512440Q-5889Q.wav
2018Q1Q10Q13Q29Q46Q512440Q-5889Q.wav
2018Q1Q10Q17Q58Q49Q512348Q-5732Q.wav
```
for i, vid in enumerate(example_names):
print(vid)
print()
example = pd.DataFrame(class_labels['display_name'][max_prob_classes_AEDM[i,:10]])
example.loc[:, 'top_10_AEDM_pred'] = pd.Series(max_prob_AEDM[i, :10], index=example.index)
example.loc[:, 'index_ASC'] = pd.Series(max_prob_classes_ASC[i,:10], index=example.index)
example.loc[:, 'display_name_ASC'] = pd.Series(
class_labels['display_name'][max_prob_classes_ASC[i,:10]],
index=example.index_ASC
)
example.loc[:, 'top_10_ASC_pred'] = pd.Series(max_prob_ASC[i, :10], index=example.index)
print(example)
example.plot.bar(x='display_name', y=['top_10_AEDM_pred', 'top_10_ASC_pred'] , rot=90)
plt.show()
print()
ex_lab = q_kong_res[vid][0]
ex_pred = q_kong_res[vid][1]
example = pd.DataFrame(class_labels[class_labels['display_name'].isin(ex_lab)])
example.loc[:, 'AEDM_pred'] = pd.Series(
predictions_AEDM[i, example.index.tolist()],
index=example.index
)
example.loc[:, 'ASC_pred'] = pd.Series(
predictions_ASC[i, example.index.tolist()],
index=example.index
)
example.loc[:, 'qkong_pred'] = pd.Series(
ex_pred,
index=example.index
)
print(example)
print()
example.plot.bar(x='display_name', y=['AEDM_pred', 'ASC_pred', 'qkong_pred'], rot=90)
plt.show()
```
## Audio set data collection pipeline
### Download, cut and convert the audio of listed urls
```
colnames = '# YTID, start_seconds, end_seconds, positive_labels'.split(', ')
print(colnames)
bal_train_csv = pd.read_csv('balanced_train_segments.csv', sep=', ', header=2) #usecols=colnames)
bal_train_csv.rename(columns={colnames[0]: colnames[0][-4:]}, inplace=True)
print(bal_train_csv.columns.values)
print(bal_train_csv.loc[:10, colnames[3]])
print(bal_train_csv.YTID.tolist()[:10])
bal_train_csv['pos_lab_list'] = bal_train_csv.positive_labels.apply(lambda x: x[1:-1].split(','))
colnames.extend('pos_lab_list')
print('Pos_lab_list')
print(bal_train_csv.loc[:10, 'pos_lab_list'])
sample_rate = 16000
audioset_short_raw_dir = os.path.join(src_dir, 'data', 'audioset_short_raw')
if not os.path.exists(audioset_short_raw_dir):
os.makedirs(audioset_short_raw_dir)
audioset_raw_dir = os.path.join(src_dir, 'data', 'audioset_raw')
if not os.path.exists(audioset_raw_dir):
os.makedirs(audioset_raw_dir)
audioset_embed_path = os.path.join(src_dir, 'data', 'audioset_embed')
if not os.path.exists(audioset_embed_path):
os.makedirs(audioset_embed_path)
audioset_video_titles = []
audioset_video_ids = bal_train_csv.YTID.tolist()
audioset_video_ids_bin = bal_train_csv.YTID.astype('|S11').tolist()
video_start_time = bal_train_csv.start_seconds.tolist()
video_end_time = bal_train_csv.end_seconds.tolist()
# Provide class dictionary for conversion from mid to either index [0] or display_name [1]
class_dict = class_labels.set_index('mid').T.to_dict('list')
print(class_dict['/m/09x0r'])
print(
list(
map(
lambda x: class_dict[x][0],
bal_train_csv.loc[0, 'pos_lab_list']
)
)
)
bal_train_csv['pos_lab_ind_list'] = bal_train_csv.pos_lab_list.apply(
lambda x: [class_dict[y][0] for y in x]
)
class_vec = np.zeros([1, 527])
class_vec[:, bal_train_csv.loc[0, 'pos_lab_ind_list']] = 1
print(class_vec)
print(bal_train_csv.dtypes)
#print(bal_train_csv.loc[:10, colnames[4]])
video_ids_incl = []
video_ids_incl_bin = []
video_ids_excl = []
vggish_embeds = []
labels = []
print(video_ids_incl)
video_ids_incl = video_ids_incl[:-1]
print(video_ids_incl)
video_ids_checked = video_ids_incl + video_ids_excl
video_ids = [vid for vid in audioset_video_ids if vid not in video_ids_checked]
for i, vid in enumerate(video_ids):
print('{}.'.format(i))
# Download and store video under data/audioset_short_raw/
if (vid + '.wav') not in os.listdir(audioset_short_raw_dir):
video_title = dl_yt.download_youtube_wav(
video_id=vid,
raw_dir=None,
short_raw_dir=audioset_short_raw_dir,
start_sec=video_start_time[i],
duration=video_end_time[i]-video_start_time[i],
sample_rate=sample_rate
)
audioset_video_titles += [video_title]
wav_available = video_title is not None
else:
print(vid, 'already downloaded, so we skip this download.')
wav_available = True
if wav_available:
video_ids_incl += [vid]
video_ids_incl_bin += [audioset_video_ids_bin[i]]
vggish_embeds.extend(
ASC.embed(
os.path.join(
audioset_short_raw_dir,
vid
) + '.wav'
)
)
class_vec = np.zeros([1, 527])
class_vec[:, bal_train_csv.loc[i, 'pos_lab_ind_list']] = 1
labels.extend(class_vec)
else:
video_ids_excl += [vid]
print()
jobs = []
for i, vid in enumerate(video_ids):
# Download and store video under data/audioset_short_raw/
if (vid + '.wav') not in os.listdir(audioset_short_raw_dir):
args = (
vid,
None,
audioset_short_raw_dir,
video_start_time[i],
video_end_time[i]-video_start_time[i],
sample_rate
)
process = multiprocessing.Process(
target=dl_yt.download_youtube_wav,
args=args
)
jobs.append(process)
# Start the processes (i.e. calculate the random number lists)
for j in jobs:
j.start()
# Ensure all of the processes have finished
for j in jobs:
j.join()
save_data(
hdf5_path=os.path.join(audioset_embed_path, 'bal_train.h5'),
x=np.array(vggish_embeds),
video_id_list=np.array(video_ids_incl_bin),
y=np.array(labels)
)
x, y, vid_list = load_data(os.path.join(audioset_embed_path, 'bal_train.h5'))
print(vid_list)
x_train, y_train, video_id_train = load_data(os.path.join(audioset_embed_path, 'bal_train.h5'))
print(video_id_train)
x_train, y_train, video_id_train = load_data(
os.path.join(
'data',
'audioset',
'packed_features',
'bal_train.h5'
)
)
print(video_id_train[:100])
from retrieve_audioset import retrieve_embeddings
retrieve_embeddings(
data_path=os.path.join('data', 'audioset')
)
```
| github_jupyter |
```
# Filter tensorflow version warnings
import os
# https://stackoverflow.com/questions/40426502/is-there-a-way-to-suppress-the-messages-tensorflow-prints/40426709
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # or any {'0', '1', '2'}
import warnings
# https://stackoverflow.com/questions/15777951/how-to-suppress-pandas-future-warning
warnings.simplefilter(action='ignore', category=FutureWarning)
warnings.simplefilter(action='ignore', category=Warning)
import tensorflow as tf
tf.get_logger().setLevel('INFO')
tf.autograph.set_verbosity(0)
import logging
tf.get_logger().setLevel(logging.ERROR)
import gym
from stable_baselines.common.policies import CnnPolicy #, MlpPolicy, CnnLstmPolicy
from stable_baselines.common.vec_env import DummyVecEnv
from stable_baselines import PPO2
from stable_baselines.common.evaluation import evaluate_policy as test
from stable_baselines.common.callbacks import EvalCallback, StopTrainingOnRewardThreshold
```
```
## Choose one agent, see Docu for description
#agent='CarRacing-v0'
#agent='CarRacing-v1'
agent='CarRacing-v3'
# Stop training when the model reaches the reward threshold
callback_on_best = StopTrainingOnRewardThreshold(reward_threshold = 170, verbose=1)
seed = 2000
## SIMULATION param
## Changing these makes world models incompatible!!
game_color = 2
indicators = True
fpst = 4
skip = 3
actions = [[0, 0, 0], [-0.4, 0, 0], [0.4, 0, 0], [0, 0.6, 0], [0, 0, 0.8]] #this is ACT
obst_loc = [6, -12, 25, -50, 75, -37, 62, -87, 95, -29] #track percentage, negative for obstacle to the left-hand side
## Loading drive_pretained model
import pickle
root = 'ppo_cnn_gym-mod_'
file = root+'c{:d}_f{:d}_s{:d}_{}_a{:d}'.format(game_color,fpst,skip,indicators,len(actions))
model = PPO2.load(file)
## This model param
use = 6 # number of times to use same track [1,100]
ept = 10 # different starting points on same track [1,20]
patience = 1.0
track_complexity = 12
#REWARD2 = [-0.05, 0.1, 0.0, 0.0, 2.0, 0.0, 100, -20, -100, -50, -5, -100]
if agent=='CarRacing-v3':
env = gym.make(agent, seed=seed,
game_color=game_color,
indicators=indicators,
frames_per_state=fpst,
skip_frames=skip,
# discre=actions, #passing custom actions
use_track = use,
episodes_per_track = ept,
tr_complexity = track_complexity,
tr_width = 45,
patience = patience,
off_track = patience,
end_on_contact = True, #learning to avoid obstacles the-hard-way
oily_patch = False,
num_obstacles = 5, #some obstacles
obst_location = obst_loc, #passing fixed obstacle location
# f_reward = REWARD2, #passing a custom reward function
verbose = 2 )
else:
env = gym.make(agent)
env = DummyVecEnv([lambda: env])
## Training on obstacles
model.set_env(env)
batch_size = 256
updates = 700
model.learn(total_timesteps = updates*batch_size, log_interval=1) #, callback=eval_callback)
#Save last updated model
file = root+'c{:d}_f{:d}_s{:d}_{}_a{:d}__u{:d}_e{:d}_p{}_bs{:d}'.format(
game_color,fpst,skip,indicators,len(actions),use,ept,patience,batch_size)
model.save(file, cloudpickle=True)
param_list=model.get_parameter_list()
env.close()
## This model param #2
use = 6 # number of times to use same track [1,100]
ept = 10 # different starting points on same track [1,20]
patience = 1.0
track_complexity = 12
#REWARD2 = [-0.05, 0.1, 0.0, 0.0, 2.0, 0.0, 100, -20, -100, -50, -5, -100]
seed = 25000
if agent=='CarRacing-v3':
env2 = gym.make(agent, seed=seed,
game_color=game_color,
indicators=indicators,
frames_per_state=fpst,
skip_frames=skip,
# discre=actions, #passing custom actions
use_track = use,
episodes_per_track = ept,
tr_complexity = track_complexity,
tr_width = 45,
patience = patience,
off_track = patience,
end_on_contact = False, # CHANGED
oily_patch = False,
num_obstacles = 5, #some obstacles
obst_location = 0, #using random obstacle location
# f_reward = REWARD2, #passing a custom reward function
verbose = 3 )
else:
env2 = gym.make(agent)
env2 = DummyVecEnv([lambda: env2])
## Training on obstacles
model.set_env(env2)
#batch_size = 384
updates = 1500
## Separate evaluation env
test_freq = 100 #policy updates until evaluation
test_episodes_per_track = 5 #number of starting points on test_track
eval_log = './evals/'
env_test = gym.make(agent, seed=int(3.14*seed),
game_color=game_color,
indicators=indicators,
frames_per_state=fpst,
skip_frames=skip,
# discre=actions, #passing custom actions
use_track = 1, #change test track after 1 ept round
episodes_per_track = test_episodes_per_track,
tr_complexity = 12, #test on a medium complexity track
tr_width = 45,
patience = 2.0,
off_track = 2.0,
end_on_contact = False,
oily_patch = False,
num_obstacles = 5,
obst_location = obst_loc) #passing fixed obstacle location
env_test = DummyVecEnv([lambda: env_test])
eval_callback = EvalCallback(env_test, callback_on_new_best=callback_on_best, #None,
n_eval_episodes=test_episodes_per_track*3, eval_freq=test_freq*batch_size,
best_model_save_path=eval_log, log_path=eval_log, deterministic=True,
render = False)
model.learn(total_timesteps = updates*batch_size, log_interval=1, callback=eval_callback)
#Save last updated model
#file = root+'c{:d}_f{:d}_s{:d}_{}_a{:d}__u{:d}_e{:d}_p{}_bs{:d}'.format(
# game_color,fpst,skip,indicators,len(actions),use,ept,patience,batch_size)
model.save(file+'_II', cloudpickle=True)
param_list=model.get_parameter_list()
env2.close()
env_test.close()
## Enjoy last trained policy
if agent=='CarRacing-v3': #create an independent test environment, almost everything in std/random definition
env3 = gym.make(agent, seed=None,
game_color=game_color,
indicators = True,
frames_per_state=fpst,
skip_frames=skip,
# discre=actions,
use_track = 2,
episodes_per_track = 1,
patience = 5.0,
off_track = 3.0 )
else:
env3 = gym.make(agent)
env3 = DummyVecEnv([lambda: env3])
obs = env3.reset()
print(obs.shape)
done = False
pasos = 0
_states=None
while not done: # and pasos<1500:
action, _states = model.predict(obs, deterministic=True)
obs, reward, done, info = env3.step(action)
env3.render()
pasos+=1
env3.close()
print()
print(reward, done, pasos) #, info)
## Enjoy best eval_policy
obs = env3.reset()
print(obs.shape)
## Load bestmodel from eval
#if not isinstance(model_test, PPO2):
model_test = PPO2.load(eval_log+'best_model', env3)
done = False
pasos = 0
_states=None
while not done: # and pasos<1500:
action, _states = model_test.predict(obs, deterministic=True)
obs, reward, done, info = env3.step(action)
env3.render()
pasos+=1
env3.close()
print()
print(reward, done, pasos)
print(action, _states)
model_test.save(file+'_evalbest', cloudpickle=True)
env2.close()
env3.close()
env_test.close()
print(action, _states)
obs.shape
```
| github_jupyter |
## Connect to Chicago Data Portal API - Business Licenses Data
```
#Import dependencies
import pandas as pd
import requests
import json
# Google developer API key
from config2 import API_chi_key
# Build API URL
# API calls = 8000 (based on zipcode and issued search results)
# Filters: 'application type' Issued
target_URL = f"https://data.cityofchicago.org/resource/xqx5-8hwx.json?$$app_token={API_chi_key}&$limit=8000&application_type=ISSUE&zip_code="
# Create list of zipcodes we are examining based
# on three different businesses of interest
zipcodes = ["60610","60607","60606","60661",
"60614","60622","60647","60654"]
# Create a request to get json data on business licences
responses = []
for zipcode in zipcodes:
license_response = requests.get(target_URL + zipcode).json()
responses.append(license_response)
len(responses)
# Create sepearte variables for the 8 responses for zipcodes
# Data loaded in nested gropus based on zipcodes, so
# needed to make them separate
zip_60610 = responses[0]
zip_60607 = responses[1]
zip_60606 = responses[2]
zip_60661 = responses[3]
zip_60614 = responses[4]
zip_60622 = responses[5]
zip_60647 = responses[6]
zip_60654 = responses[7]
# Read zipcode_responses_busi.json files into pd DF
zip_60610_data = pd.DataFrame(zip_60610)
# Create list of the json object variables
# excluding zip_60610 bc that will start as a DF
zip_data = [zip_60607, zip_60606, zip_60661, zip_60614,
zip_60622, zip_60647, zip_60654]
# Create a new DF to save compiled business data into
all_7_zipcodes = zip_60610_data
# Append json objects to all_7_zipcode DF
# Print length of all_7_zipcode to check adding correctly
for zipcodes_df in zip_data:
all_7_zipcodes = all_7_zipcodes.append(zipcodes_df)
len(all_7_zipcodes)
# Get list of headers of all_7_zipcodes
list(all_7_zipcodes)
# Select certain columns to show
core_info_busi_licences = all_7_zipcodes[['legal_name', 'doing_business_as_name',
'zip_code', 'license_description',
'business_activity', 'application_type',
'license_start_date', 'latitude', 'longitude']]
# Get an idea of the number of null values in each column
core_info_busi_licences.isna().sum()
# Add sepearate column for just the start year
# Will use later when selecting year businesess were created
core_info_busi_licences['start_year'] = core_info_busi_licences['license_start_date']
# Edit 'start_year' to just include year from date information
core_info_busi_licences['start_year'] = core_info_busi_licences['start_year'].str[0:4]
# Explore what kinds of businesses are missing "latitude" and "longitude"
# Also, the 'business_activity' licenses have null values (limited Business Licences?)
core_info_busi_licences[core_info_busi_licences.isnull().any(axis=1)]
# Get rid of NaN values in 'latitude' and 'license_start_date'
core_info_busi_licences.dropna(subset=['latitude'], inplace=True)
core_info_busi_licences.dropna(subset=['license_start_date'], inplace=True)
core_info_busi_licences['application_type'].unique()
# Cast 'start_year' column as an integer
core_info_busi_licences['start_year'] = core_info_busi_licences['start_year'].astype('int64')
# Confirm that NaN values for 'latitude' and 'license_start_date'
# were dropped
core_info_busi_licences.isna().sum()
# Record number of businesses licenses pulled
len(core_info_busi_licences)
```
## Connect to sqlite database
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy import create_engine
from config2 import mysql_password
# Declare a Base using `automap_base()`
Base = automap_base()
# Create engine using the `demographics.sqlite` database file
# engine = create_engine("sqlite://", echo=False)
engine = create_engine(f'mysql://root:coolcat1015@localhost:3306/real_tech_db')
# Copy 'core_info_busi_licenses' db to MySql database
core_info_busi_licences.to_sql('business_licenses',
con=engine,
if_exists='replace',
index_label=True)
```
| github_jupyter |
<a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W3D1_BayesianDecisions/W3D1_Tutorial1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# Tutorial 1: Bayes with a binary hidden state
**Week 3, Day 1: Bayesian Decisions**
**By Neuromatch Academy**
__Content creators:__ [insert your name here]
__Content reviewers:__
# Tutorial Objectives
This is the first in a series of two core tutorials on Bayesian statistics. In these tutorials, we will explore the fundemental concepts of the Bayesian approach from two perspectives. This tutorial will work through an example of Bayesian inference and decision making using a binary hidden state. The second main tutorial extends these concepts to a continuous hidden state. In the next days, each of these basic ideas will be extended--first through time as we consider what happens when we infere a hidden state using multiple observations and when the hidden state changes across time. In the third day, we will introduce the notion of how to use inference and decisions to select actions for optimal control. For this tutorial, you will be introduced to our binary state fishing problem!
This notebook will introduce the fundamental building blocks for Bayesian statistics:
1. How do we use probability distributions to represent hidden states?
2. How does marginalization work and how can we use it?
3. How do we combine new information with our prior knowledge?
4. How do we combine the possible loss (or gain) for making a decision with our probabilitic knowledge?
```
#@title Video 1: Introduction to Bayesian Statistics
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='JiEIn9QsrFg', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
## Setup
Please execute the cells below to initialize the notebook environment.
```
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import transforms
from matplotlib import gridspec
from scipy.optimize import fsolve
from collections import namedtuple
#@title Figure Settings
import ipywidgets as widgets # interactive display
from ipywidgets import GridspecLayout
from IPython.display import clear_output
%config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
import warnings
warnings.filterwarnings("ignore")
# @title Plotting Functions
def plot_joint_probs(P, ):
assert np.all(P >= 0), "probabilities should be >= 0"
# normalize if not
P = P / np.sum(P)
marginal_y = np.sum(P,axis=1)
marginal_x = np.sum(P,axis=0)
# definitions for the axes
left, width = 0.1, 0.65
bottom, height = 0.1, 0.65
spacing = 0.005
# start with a square Figure
fig = plt.figure(figsize=(5, 5))
joint_prob = [left, bottom, width, height]
rect_histx = [left, bottom + height + spacing, width, 0.2]
rect_histy = [left + width + spacing, bottom, 0.2, height]
rect_x_cmap = plt.cm.Blues
rect_y_cmap = plt.cm.Reds
# Show joint probs and marginals
ax = fig.add_axes(joint_prob)
ax_x = fig.add_axes(rect_histx, sharex=ax)
ax_y = fig.add_axes(rect_histy, sharey=ax)
# Show joint probs and marginals
ax.matshow(P,vmin=0., vmax=1., cmap='Greys')
ax_x.bar(0, marginal_x[0], facecolor=rect_x_cmap(marginal_x[0]))
ax_x.bar(1, marginal_x[1], facecolor=rect_x_cmap(marginal_x[1]))
ax_y.barh(0, marginal_y[0], facecolor=rect_y_cmap(marginal_y[0]))
ax_y.barh(1, marginal_y[1], facecolor=rect_y_cmap(marginal_y[1]))
# set limits
ax_x.set_ylim([0,1])
ax_y.set_xlim([0,1])
# show values
ind = np.arange(2)
x,y = np.meshgrid(ind,ind)
for i,j in zip(x.flatten(), y.flatten()):
c = f"{P[i,j]:.2f}"
ax.text(j,i, c, va='center', ha='center', color='black')
for i in ind:
v = marginal_x[i]
c = f"{v:.2f}"
ax_x.text(i, v +0.1, c, va='center', ha='center', color='black')
v = marginal_y[i]
c = f"{v:.2f}"
ax_y.text(v+0.2, i, c, va='center', ha='center', color='black')
# set up labels
ax.xaxis.tick_bottom()
ax.yaxis.tick_left()
ax.set_xticks([0,1])
ax.set_yticks([0,1])
ax.set_xticklabels(['Silver','Gold'])
ax.set_yticklabels(['Small', 'Large'])
ax.set_xlabel('color')
ax.set_ylabel('size')
ax_x.axis('off')
ax_y.axis('off')
return fig
# test
# P = np.random.rand(2,2)
# P = np.asarray([[0.9, 0.8], [0.4, 0.1]])
# P = P / np.sum(P)
# fig = plot_joint_probs(P)
# plt.show(fig)
# plt.close(fig)
# fig = plot_prior_likelihood(0.5, 0.3)
# plt.show(fig)
# plt.close(fig)
def plot_prior_likelihood_posterior(prior, likelihood, posterior):
# definitions for the axes
left, width = 0.05, 0.3
bottom, height = 0.05, 0.9
padding = 0.1
small_width = 0.1
left_space = left + small_width + padding
added_space = padding + width
fig = plt.figure(figsize=(10, 4))
rect_prior = [left, bottom, small_width, height]
rect_likelihood = [left_space , bottom , width, height]
rect_posterior = [left_space + added_space, bottom , width, height]
ax_prior = fig.add_axes(rect_prior)
ax_likelihood = fig.add_axes(rect_likelihood, sharey=ax_prior)
ax_posterior = fig.add_axes(rect_posterior, sharey = ax_prior)
rect_colormap = plt.cm.Blues
# Show posterior probs and marginals
ax_prior.barh(0, prior[0], facecolor = rect_colormap(prior[0, 0]))
ax_prior.barh(1, prior[1], facecolor = rect_colormap(prior[1, 0]))
ax_likelihood.matshow(likelihood, vmin=0., vmax=1., cmap='Reds')
ax_posterior.matshow(posterior, vmin=0., vmax=1., cmap='Greens')
# Probabilities plot details
ax_prior.set(xlim = [1, 0], yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', title = "Prior p(s)")
ax_prior.axis('off')
# Likelihood plot details
ax_likelihood.set(xticks = [0, 1], xticklabels = ['fish', 'no fish'],
yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', xlabel = 'measurement (m)',
title = 'Likelihood p(m (right) | s)')
ax_likelihood.xaxis.set_ticks_position('bottom')
ax_likelihood.spines['left'].set_visible(False)
ax_likelihood.spines['bottom'].set_visible(False)
# Posterior plot details
ax_posterior.set(xticks = [0, 1], xticklabels = ['fish', 'no fish'],
yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', xlabel = 'measurement (m)',
title = 'Posterior p(s | m)')
ax_posterior.xaxis.set_ticks_position('bottom')
ax_posterior.spines['left'].set_visible(False)
ax_posterior.spines['bottom'].set_visible(False)
# show values
ind = np.arange(2)
x,y = np.meshgrid(ind,ind)
for i,j in zip(x.flatten(), y.flatten()):
c = f"{posterior[i,j]:.2f}"
ax_posterior.text(j,i, c, va='center', ha='center', color='black')
for i,j in zip(x.flatten(), y.flatten()):
c = f"{likelihood[i,j]:.2f}"
ax_likelihood.text(j,i, c, va='center', ha='center', color='black')
for i in ind:
v = prior[i, 0]
c = f"{v:.2f}"
ax_prior.text(v+0.2, i, c, va='center', ha='center', color='black')
def plot_prior_likelihood(ps, p_a_s1, p_a_s0, measurement):
likelihood = np.asarray([[p_a_s1, 1-p_a_s1],[p_a_s0, 1-p_a_s0]])
assert 0.0 <= ps <= 1.0
prior = np.asarray([ps, 1 - ps])
if measurement:
posterior = likelihood[:, 0] * prior
else:
posterior = (likelihood[:, 1] * prior).reshape(-1)
posterior /= np.sum(posterior)
# definitions for the axes
left, width = 0.05, 0.3
bottom, height = 0.05, 0.9
padding = 0.1
small_width = 0.22
left_space = left + small_width + padding
small_padding = 0.05
fig = plt.figure(figsize=(10, 4))
rect_prior = [left, bottom, small_width, height]
rect_likelihood = [left_space , bottom , width, height]
rect_posterior = [left_space + width + small_padding, bottom , small_width, height]
ax_prior = fig.add_axes(rect_prior)
ax_likelihood = fig.add_axes(rect_likelihood, sharey=ax_prior)
ax_posterior = fig.add_axes(rect_posterior, sharey=ax_prior)
prior_colormap = plt.cm.Blues
posterior_colormap = plt.cm.Greens
# Show posterior probs and marginals
ax_prior.barh(0, prior[0], facecolor = prior_colormap(prior[0]))
ax_prior.barh(1, prior[1], facecolor = prior_colormap(prior[1]))
ax_likelihood.matshow(likelihood, vmin=0., vmax=1., cmap='Reds')
# ax_posterior.matshow(posterior, vmin=0., vmax=1., cmap='')
ax_posterior.barh(0, posterior[0], facecolor = posterior_colormap(posterior[0]))
ax_posterior.barh(1, posterior[1], facecolor = posterior_colormap(posterior[1]))
# Probabilities plot details
ax_prior.set(xlim = [1, 0], yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', title = "Prior p(s)")
ax_prior.axis('off')
# Likelihood plot details
ax_likelihood.set(xticks = [0, 1], xticklabels = ['fish', 'no fish'],
yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', xlabel = 'measurement (m)',
title = 'Likelihood p(m | s)')
ax_likelihood.xaxis.set_ticks_position('bottom')
ax_likelihood.spines['left'].set_visible(False)
ax_likelihood.spines['bottom'].set_visible(False)
# Posterior plot details
ax_posterior.set(xlim = [0, 1], yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', title = "Posterior p(s | m)")
ax_posterior.axis('off')
# ax_posterior.set(xticks = [0, 1], xticklabels = ['fish', 'no fish'],
# yticks = [0, 1], yticklabels = ['left', 'right'],
# ylabel = 'state (s)', xlabel = 'measurement (m)',
# title = 'Posterior p(s | m)')
# ax_posterior.xaxis.set_ticks_position('bottom')
# ax_posterior.spines['left'].set_visible(False)
# ax_posterior.spines['bottom'].set_visible(False)
# show values
ind = np.arange(2)
x,y = np.meshgrid(ind,ind)
# for i,j in zip(x.flatten(), y.flatten()):
# c = f"{posterior[i,j]:.2f}"
# ax_posterior.text(j,i, c, va='center', ha='center', color='black')
for i in ind:
v = posterior[i]
c = f"{v:.2f}"
ax_posterior.text(v+0.2, i, c, va='center', ha='center', color='black')
for i,j in zip(x.flatten(), y.flatten()):
c = f"{likelihood[i,j]:.2f}"
ax_likelihood.text(j,i, c, va='center', ha='center', color='black')
for i in ind:
v = prior[i]
c = f"{v:.2f}"
ax_prior.text(v+0.2, i, c, va='center', ha='center', color='black')
return fig
# fig = plot_prior_likelihood(0.5, 0.3)
# plt.show(fig)
# plt.close(fig)
from matplotlib import colors
def plot_utility(ps):
prior = np.asarray([ps, 1 - ps])
utility = np.array([[2, -3], [-2, 1]])
expected = prior @ utility
# definitions for the axes
left, width = 0.05, 0.16
bottom, height = 0.05, 0.9
padding = 0.04
small_width = 0.1
left_space = left + small_width + padding
added_space = padding + width
fig = plt.figure(figsize=(17, 3))
rect_prior = [left, bottom, small_width, height]
rect_utility = [left + added_space , bottom , width, height]
rect_expected = [left + 2* added_space, bottom , width, height]
ax_prior = fig.add_axes(rect_prior)
ax_utility = fig.add_axes(rect_utility, sharey=ax_prior)
ax_expected = fig.add_axes(rect_expected)
rect_colormap = plt.cm.Blues
# Data of plots
ax_prior.barh(0, prior[0], facecolor = rect_colormap(prior[0]))
ax_prior.barh(1, prior[1], facecolor = rect_colormap(prior[1]))
ax_utility.matshow(utility, cmap='cool')
norm = colors.Normalize(vmin=-3, vmax=3)
ax_expected.bar(0, expected[0], facecolor = rect_colormap(norm(expected[0])))
ax_expected.bar(1, expected[1], facecolor = rect_colormap(norm(expected[1])))
# Probabilities plot details
ax_prior.set(xlim = [1, 0], yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', title = "Probability of state")
ax_prior.axis('off')
# Utility plot details
ax_utility.set(xticks = [0, 1], xticklabels = ['left', 'right'],
yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', xlabel = 'action (a)',
title = 'Utility')
ax_utility.xaxis.set_ticks_position('bottom')
ax_utility.spines['left'].set_visible(False)
ax_utility.spines['bottom'].set_visible(False)
# Expected utility plot details
ax_expected.set(title = 'Expected utility', ylim = [-3, 3],
xticks = [0, 1], xticklabels = ['left', 'right'],
xlabel = 'action (a)',
yticks = [])
ax_expected.xaxis.set_ticks_position('bottom')
ax_expected.spines['left'].set_visible(False)
ax_expected.spines['bottom'].set_visible(False)
# show values
ind = np.arange(2)
x,y = np.meshgrid(ind,ind)
for i,j in zip(x.flatten(), y.flatten()):
c = f"{utility[i,j]:.2f}"
ax_utility.text(j,i, c, va='center', ha='center', color='black')
for i in ind:
v = prior[i]
c = f"{v:.2f}"
ax_prior.text(v+0.2, i, c, va='center', ha='center', color='black')
for i in ind:
v = expected[i]
c = f"{v:.2f}"
ax_expected.text(i, 2.5, c, va='center', ha='center', color='black')
return fig
def plot_prior_likelihood_utility(ps, p_a_s1, p_a_s0,measurement):
assert 0.0 <= ps <= 1.0
assert 0.0 <= p_a_s1 <= 1.0
assert 0.0 <= p_a_s0 <= 1.0
prior = np.asarray([ps, 1 - ps])
likelihood = np.asarray([[p_a_s1, 1-p_a_s1],[p_a_s0, 1-p_a_s0]])
utility = np.array([[2.0, -3.0], [-2.0, 1.0]])
# expected = np.zeros_like(utility)
if measurement:
posterior = likelihood[:, 0] * prior
else:
posterior = (likelihood[:, 1] * prior).reshape(-1)
posterior /= np.sum(posterior)
# expected[:, 0] = utility[:, 0] * posterior
# expected[:, 1] = utility[:, 1] * posterior
expected = posterior @ utility
# definitions for the axes
left, width = 0.05, 0.15
bottom, height = 0.05, 0.9
padding = 0.05
small_width = 0.1
large_padding = 0.07
left_space = left + small_width + large_padding
fig = plt.figure(figsize=(17, 4))
rect_prior = [left, bottom+0.05, small_width, height-0.1]
rect_likelihood = [left_space, bottom , width, height]
rect_posterior = [left_space + padding + width - 0.02, bottom+0.05 , small_width, height-0.1]
rect_utility = [left_space + padding + width + padding + small_width, bottom , width, height]
rect_expected = [left_space + padding + width + padding + small_width + padding + width, bottom+0.05 , width, height-0.1]
ax_likelihood = fig.add_axes(rect_likelihood)
ax_prior = fig.add_axes(rect_prior, sharey=ax_likelihood)
ax_posterior = fig.add_axes(rect_posterior, sharey=ax_likelihood)
ax_utility = fig.add_axes(rect_utility, sharey=ax_posterior)
ax_expected = fig.add_axes(rect_expected)
prior_colormap = plt.cm.Blues
posterior_colormap = plt.cm.Greens
expected_colormap = plt.cm.Wistia
# Show posterior probs and marginals
ax_prior.barh(0, prior[0], facecolor = prior_colormap(prior[0]))
ax_prior.barh(1, prior[1], facecolor = prior_colormap(prior[1]))
ax_likelihood.matshow(likelihood, vmin=0., vmax=1., cmap='Reds')
ax_posterior.barh(0, posterior[0], facecolor = posterior_colormap(posterior[0]))
ax_posterior.barh(1, posterior[1], facecolor = posterior_colormap(posterior[1]))
ax_utility.matshow(utility, vmin=0., vmax=1., cmap='cool')
# ax_expected.matshow(expected, vmin=0., vmax=1., cmap='Wistia')
ax_expected.bar(0, expected[0], facecolor = expected_colormap(expected[0]))
ax_expected.bar(1, expected[1], facecolor = expected_colormap(expected[1]))
# Probabilities plot details
ax_prior.set(xlim = [1, 0], yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', title = "Prior p(s)")
ax_prior.axis('off')
# Likelihood plot details
ax_likelihood.set(xticks = [0, 1], xticklabels = ['fish', 'no fish'],
yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', xlabel = 'measurement (m)',
title = 'Likelihood p(m | s)')
ax_likelihood.xaxis.set_ticks_position('bottom')
ax_likelihood.spines['left'].set_visible(False)
ax_likelihood.spines['bottom'].set_visible(False)
# Posterior plot details
ax_posterior.set(xlim = [0, 1], yticks = [0, 1], yticklabels = ['left', 'right'],
ylabel = 'state (s)', title = "Posterior p(s | m)")
ax_posterior.axis('off')
# Utility plot details
ax_utility.set(xticks = [0, 1], xticklabels = ['left', 'right'],
xlabel = 'action (a)',
title = 'Utility')
ax_utility.xaxis.set_ticks_position('bottom')
ax_utility.spines['left'].set_visible(False)
ax_utility.spines['bottom'].set_visible(False)
# Expected Utility plot details
ax_expected.set(ylim = [-2, 2], xticks = [0, 1], xticklabels = ['left', 'right'],
xlabel = 'action (a)', title = 'Expected utility', yticks=[])
# ax_expected.axis('off')
ax_expected.spines['left'].set_visible(False)
# ax_expected.set(xticks = [0, 1], xticklabels = ['left', 'right'],
# xlabel = 'action (a)',
# title = 'Expected utility')
# ax_expected.xaxis.set_ticks_position('bottom')
# ax_expected.spines['left'].set_visible(False)
# ax_expected.spines['bottom'].set_visible(False)
# show values
ind = np.arange(2)
x,y = np.meshgrid(ind,ind)
for i in ind:
v = posterior[i]
c = f"{v:.2f}"
ax_posterior.text(v+0.2, i, c, va='center', ha='center', color='black')
for i,j in zip(x.flatten(), y.flatten()):
c = f"{likelihood[i,j]:.2f}"
ax_likelihood.text(j,i, c, va='center', ha='center', color='black')
for i,j in zip(x.flatten(), y.flatten()):
c = f"{utility[i,j]:.2f}"
ax_utility.text(j,i, c, va='center', ha='center', color='black')
# for i,j in zip(x.flatten(), y.flatten()):
# c = f"{expected[i,j]:.2f}"
# ax_expected.text(j,i, c, va='center', ha='center', color='black')
for i in ind:
v = prior[i]
c = f"{v:.2f}"
ax_prior.text(v+0.2, i, c, va='center', ha='center', color='black')
for i in ind:
v = expected[i]
c = f"{v:.2f}"
ax_expected.text(i, v, c, va='center', ha='center', color='black')
# # show values
# ind = np.arange(2)
# x,y = np.meshgrid(ind,ind)
# for i,j in zip(x.flatten(), y.flatten()):
# c = f"{P[i,j]:.2f}"
# ax.text(j,i, c, va='center', ha='center', color='white')
# for i in ind:
# v = marginal_x[i]
# c = f"{v:.2f}"
# ax_x.text(i, v +0.2, c, va='center', ha='center', color='black')
# v = marginal_y[i]
# c = f"{v:.2f}"
# ax_y.text(v+0.2, i, c, va='center', ha='center', color='black')
return fig
# @title Helper Functions
def compute_marginal(px, py, cor):
# calculate 2x2 joint probabilities given marginals p(x=1), p(y=1) and correlation
p11 = px*py + cor*np.sqrt(px*py*(1-px)*(1-py))
p01 = px - p11
p10 = py - p11
p00 = 1.0 - p11 - p01 - p10
return np.asarray([[p00, p01], [p10, p11]])
# test
# print(compute_marginal(0.4, 0.6, -0.8))
def compute_cor_range(px,py):
# Calculate the allowed range of correlation values given marginals p(x=1) and p(y=1)
def p11(corr):
return px*py + corr*np.sqrt(px*py*(1-px)*(1-py))
def p01(corr):
return px - p11(corr)
def p10(corr):
return py - p11(corr)
def p00(corr):
return 1.0 - p11(corr) - p01(corr) - p10(corr)
Cmax = min(fsolve(p01, 0.0), fsolve(p10, 0.0))
Cmin = max(fsolve(p11, 0.0), fsolve(p00, 0.0))
return Cmin, Cmax
```
---
# Section 1: Gone Fishin'
```
#@title Video 2: Gone Fishin'
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='McALsTzb494', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
You were just introduced to the **binary hidden state problem** we are going to explore. You need to decide which side to fish on. We know fish like to school together. On different days the school of fish is either on the left or right side, but we don’t know what the case is today. We will represent our knowledge probabilistically, asking how to make a decision (where to decide the fish are or where to fish) and what to expect in terms of gains or losses. In the next two sections we will consider just the probability of where the fish might be and what you gain or lose by choosing where to fish.
Remember, you can either think of your self as a scientist conducting an experiment or as a brain trying to make a decision. The Bayesian approach is the same!
---
# Section 2: Deciding where to fish
```
#@title Video 3: Utility
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='xvIVZrqF_5s', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
You know the probability that the school of fish is on the left side of the dock today, $P(s = left)$. You also know the probability that it is on the right, $P(s = right)$, because these two probabilities must add up to 1. You need to decide where to fish. It may seem obvious - you could just fish on the side where the probability of the fish being is higher! Unfortunately, decisions and actions are always a little more complicated. Deciding to fish may be influenced by more than just the probability of the school of fish being there as we saw by the potential issues of submarines and sunburn.
We quantify these factors numerically using **utility**, which describes the consequences of your actions: how much value you gain (or if negative, lose) given the state of the world ($s$) and the action you take ($a$). In our example, our utility can be summarized as:
| Utility: U(s,a) | a = left | a = right |
| ----------------- |----------|----------|
| s = Left | 2 | -3 |
| s = right | -2 | 1 |
To use utility to choose an action, we calculate the **expected utility** of that action by weighing these utilities with the probability of that state occuring. This allows us to choose actions by taking probabilities of events into account: we don't care if the outcome of an action-state pair is a loss if the probability of that state is very low. We can formalize this as:
$$\text{Expected utility of action a} = \sum_{s}U(s,a)P(s) $$
In other words, the expected utility of an action a is the sum over possible states of the utility of that action and state times the probability of that state.
## Interactive Demo 2: Exploring the decision
Let's start to get a sense of how all this works.
Take a look at the interactive demo below. You can change the probability that the school of fish is on the left side ($p(s = left)$ using the slider. You will see the utility matrix and the corresponding expected utility of each action.
First, make sure you understand how the expected utility of each action is being computed from the probabilities and the utility values. In the initial state: the probability of the fish being on the left is 0.9 and on the right is 0.1. The expected utility of the action of fishing on the left is then $U(s = left,a = left)p(s = left) + U(s = right,a = left)p(s = right) = 2(0.9) + -2(0.1) = 1.6$.
For each of these scenarios, think and discuss first. Then use the demo to try out each and see if your action would have been correct (that is, if the expected value of that action is the highest).
1. You just arrived at the dock for the first time and have no sense of where the fish might be. So you guess that the probability of the school being on the left side is 0.5 (so the probability on the right side is also 0.5). Which side would you choose to fish on given our utility values?
2. You think that the probability of the school being on the left side is very low (0.1) and correspondingly high on the right side (0.9). Which side would you choose to fish on given our utility values?
3. What would you choose if the probability of the school being on the left side is slightly lower than on the right side (0. 4 vs 0.6)?
```
# @markdown Execute this cell to use the widget
ps_widget = widgets.FloatSlider(0.9, description='p(s = left)', min=0.0, max=1.0, step=0.01)
@widgets.interact(
ps = ps_widget,
)
def make_utility_plot(ps):
fig = plot_utility(ps)
plt.show(fig)
plt.close(fig)
return None
# to_remove explanation
# 1) With equal probabilities, the expected utility is higher on the left side,
# since that is the side without submarines, so you would choose to fish there.
# 2) If the probability that the fish is on the right side is high, you would
# choose to fish there. The high probability of fish being on the right far outweights
# the slightly higher utilities from fishing on the left (as you are unlikely to gain these)
# 3) If the probability that the fish is on the right side is just slightly higher
#. than on the left, you would choose the left side as the expected utility is still
#. higher on the left. Note that in this situation, you are not simply choosing the
#. side with the higher probability - the utility really matters here for the decision
```
In this section, you have seen that both the utility of various state and action pairs and our knowledge of the probability of each state affects your decision. Importantly, we want our knowledge of the probability of each state to be as accurate as possible!
So how do we know these probabilities? We may have prior knowledge from years of fishing at the same dock. Over those years, we may have learned that the fish are more likely to be on the left side for example. We want to make sure this knowledge is as accurate as possible though. To do this, we want to collect more data, or take some more measurements! For the next few sections, we will focus on making our knowledge of the probability as accurate as possible, before coming back to using utility to make decisions.
---
# Section 3: Likelihood of the fish being on either side
```
#@title Video 4: Likelihood
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='l4m0JzMWGio', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
First, we'll think about what it means to take a measurement (also often called an observation or just data) and what it tells you about what the hidden state may be. Specifically, we'll be looking at the **likelihood**, which is the probability of your measurement ($m$) given the hidden state ($s$): $P(m | s)$. Remember that in this case, the hidden state is which side of the dock the school of fish is on.
We will watch someone fish (for let's say 10 minutes) and our measurement is whether they catch a fish or not. We know something about what catching a fish means for the likelihood of the fish being on one side or the other.
## Think! 3: Guessing the location of the fish
Let's say we go to different dock from the one in the video. Here, there are different probabilities of catching fish given the state of the world. In this case, if they fish on the side of the dock where the fish are, they have a 70% chance of catching a fish. Otherwise, they catch a fish with only 20% probability.
The fisherperson is fishing on the left side.
1) Figure out each of the following:
- probability of catching a fish given that the school of fish is on the left side, $P(m = catch\text{ } fish | s = left )$
- probability of not catching a fish given that the school of fish is on the left side, $P(m = no \text{ } fish | s = left)$
- probability of catching a fish given that the school of fish is on the right side, $P(m = catch \text{ } fish | s = right)$
- probability of not catching a fish given that the school of fish is on the right side, $P(m = no \text{ } fish | s = right)$
2) If the fisherperson catches a fish, which side would you guess the school is on? Why?
3) If the fisherperson does not catch a fish, which side would you guess the school is on? Why?
```
#to_remove explanation
# 1) The fisherperson is on the left side so:
# - P(m = catch fish | s = left) = 0.7 because they have a 70% chance of catching
# a fish when on the same side as the school
# - P(m = no fish | s = left) = 0.3 because the probability of catching a fish
# and not catching a fish for a given state must add up to 1 as these
# are the only options: 1 - 0.7 = 0.3
# - P(m = catch fish | s = right) = 0.2
# - P(m = no fish | s = right) = 0.8
# 2) If the fisherperson catches a fish, you would guess the school of fish is on the
# left side. This is because the probability of catching a fish given that the
# school is on the left side (0.7) is higher than the probability given that
# the school is on the right side (0.2).
# 3) If the fisherperson does not catch a fish, you would guess the school of fish is on the
# right side. This is because the probability of not catching a fish given that the
# school is on the right side (0.8) is higher than the probability given that
# the school is on the right side (0.3).
```
In the prior exercise, you guessed where the school of fish was based on the measurement you took (watching someone fish). You did this by choosing the state (side of school) that maximized the probability of the measurement. In other words, you estimated the state by maximizing the likelihood (had the highest probability of measurement given state $P(m|s$)). This is called maximum likelihood estimation (MLE) and you've encountered it before during this course, in W1D3!
What if you had been going to this river for years and you knew that the fish were almost always on the left side? This would probably affect how you make your estimate - you would rely less on the single new measurement and more on your prior knowledge. This is the idea behind Bayesian inference, as we will see later in this tutorial!
---
# Section 4: Correlation and marginalization
```
#@title Video 5: Correlation and marginalization
from IPython.display import YouTubeVideo
video = YouTubeVideo(id='vsDjtWi-BVo', width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
```
In this section, we are going to take a step back for a bit and think more generally about the amount of information shared between two random variables. We want to know how much information you gain when you observe one variable (take a measurement) if you know something about another. We will see that the fundamental concept is the same if we think about two attributes, for example the size and color of the fish, or the prior information and the likelihood.
## Math Exercise 4: Computing marginal likelihoods
To understand the information between two variables, let's first consider the size and color of the fish.
| P(X, Y) | Y = silver | Y = gold |
| ----------------- |----------|----------|
| X = small | 0.4 | 0.2 |
| X = large | 0.1 | 0.3 |
The table above shows us the **joint probabilities**: the probability of both specific attributes occuring together. For example, the probability of a fish being small and silver ($P(X = small, Y = silver$) is 0.4.
We want to know what the probability of a fish being small regardless of color. Since the fish are either silver or gold, this would be the probability of a fish being small and silver plus the probability of a fish being small and gold. This is an example of marginalizing, or averaging out, the variable we are not interested in across the rows or columns.. In math speak: $P(X = small) = \sum_y{P(X = small, Y)}$. This gives us a **marginal probability**, a probability of a variable outcome (in this case size), regardless of the other variables (in this case color).
Please complete the following math problems to further practice thinking through probabilities:
1. Calculate the probability of a fish being silver.
2. Calculate the probability of a fish being small, large, silver, or gold.
3. Calculate the probability of a fish being small OR gold. (Hint: $P(A\ \textrm{or}\ B) = P(A) + P(B) - P(A\ \textrm{and}\ B)$)
```
# to_remove explanation
# 1) The probability of a fish being silver is the joint probability of it being
#. small and silver plus the joint probability of it being large and silver:
#
#. P(Y = silver) = P(X = small, Y = silver) + P(X = large, Y = silver)
#. = 0.4 + 0.1
#. = 0.5
# 2) This is all the possibilities as in this scenario, our fish can only be small
#. or large, silver or gold. So the probability is 1 - the fish has to be at
#. least one of these.
#. 3) First we compute the marginal probabilities
#. P(X = small) = P(X = small, Y = silver) + P(X = small, Y = gold) = 0.6
#. P(Y = gold) = P(X = small, Y = gold) + P(X = large, Y = gold) = 0.5
#. We already know the joint probability: P(X = small, Y = gold) = 0.2
#. We can now use the given formula:
#. P( X = small or Y = gold) = P(X = small) + P(Y = gold) - P(X = small, Y = gold)
#. = 0.6 + 0.5 - 0.2
#. = 0.9
```
## Think! 4: Covarying probability distributions
The relationship between the marginal probabilities and the joint probabilities is determined by the correlation between the two random variables - a normalized measure of how much the variables covary. We can also think of this as gaining some information about one of the variables when we observe a measurement from the other. We will think about this more formally in Tutorial 2.
Here, we want to think about how the correlation between size and color of these fish changes how much information we gain about one attribute based on the other. See Bonus Section 1 for the formula for correlation.
Use the widget below and answer the following questions:
1. When the correlation is zero, $\rho = 0$, what does the distribution of size tell you about color?
2. Set $\rho$ to something small. As you change the probability of golden fish, what happens to the ratio of size probabilities? Set $\rho$ larger (can be negative). Can you explain the pattern of changes in the probabilities of size as you change the probability of golden fish?
3. Set the probability of golden fish and of large fish to around 65%. As the correlation goes towards 1, how often will you see silver large fish?
4. What is increasing the (absolute) correlation telling you about how likely you are to see one of the properties if you see a fish with the other?
```
# @markdown Execute this cell to enable the widget
style = {'description_width': 'initial'}
gs = GridspecLayout(2,2)
cor_widget = widgets.FloatSlider(0.0, description='ρ', min=-1, max=1, step=0.01)
px_widget = widgets.FloatSlider(0.5, description='p(color=golden)', min=0.01, max=0.99, step=0.01, style=style)
py_widget = widgets.FloatSlider(0.5, description='p(size=large)', min=0.01, max=0.99, step=0.01, style=style)
gs[0,0] = cor_widget
gs[0,1] = px_widget
gs[1,0] = py_widget
@widgets.interact(
px=px_widget,
py=py_widget,
cor=cor_widget,
)
def make_corr_plot(px, py, cor):
Cmin, Cmax = compute_cor_range(px, py) #allow correlation values
cor_widget.min, cor_widget.max = Cmin+0.01, Cmax-0.01
if cor_widget.value > Cmax:
cor_widget.value = Cmax
if cor_widget.value < Cmin:
cor_widget.value = Cmin
cor = cor_widget.value
P = compute_marginal(px,py,cor)
# print(P)
fig = plot_joint_probs(P)
plt.show(fig)
plt.close(fig)
return None
# gs[1,1] = make_corr_plot()
# to_remove explanation
#' 1. When the correlation is zero, the two properties are completely independent.
#' This means you don't gain any information about one variable from observing the other.
#' Importantly, the marginal distribution of one variable is therefore independent of the other.
#' 2. The correlation controls the distribution of probability across the joint probabilty table.
#' The higher the correlation, the more the probabilities are restricted by the fact that both rows
#' and columns need to sum to one! While the marginal probabilities show the relative weighting, the
#' absolute probabilities for one quality will become more dependent on the other as the correlation
#' goes to 1 or -1.
#' 3. The correlation will control how much probabilty mass is located on the diagonals. As the
#' correlation goes to 1 (or -1), the probability of seeing the one of the two pairings has to goes
#' towards zero!
#' 4. If we think about what information we gain by observing one quality, the intution from (3.) tells
#' us that we know more (have more information) about the other quality as a function of the correlation.
```
We have just seen how two random variables can be more or less independent. The more correlated, the less independent, and the more shared information. We also learned that we can marginalize to determine the marginal likelihood of a hidden state or to find the marginal probability distribution of two random variables. We are going to now complete our journey towards being fully Bayesian!
---
# Section 5: Bayes' Rule and the Posterior
Marginalization is going to be used to combine our prior knowlege, which we call the **prior**, and our new information from a measurement, the **likelihood**. Only in this case, the information we gain about the hidden state we are interested in, where the fish are, is based on the relationship between the probabilities of the measurement and our prior.
We can now calculate the full posterior distribution for the hidden state ($s$) using Bayes' Rule. As we've seen, the posterior is proportional the the prior times the likelihood. This means that the posterior probability of the hidden state ($s$) given a measurement ($m$) is proportional to the likelihood of the measurement given the state times the prior probability of that state (the marginal likelihood):
$$ P(s | m) \propto P(m | s) P(s) $$
We say proportional to instead of equal because we need to normalize to produce a full probability distribution:
$$ P(s | m) = \frac{P(m | s) P(s)}{P(m)} $$
Normalizing by this $P(m)$ means that our posterior is a complete probability distribution that sums or integrates to 1 appropriately. We now can use this new, complete probability distribution for any future inference or decisions we like! In fact, as we will see tomorrow, we can use it as a new prior! Finally, we often call this probability distribution our beliefs over the hidden states, to emphasize that it is our subjective knowlege about the hidden state.
For many complicated cases, like those we might be using to model behavioral or brain inferences, the normalization term can be intractable or extremely complex to calculate. We can be careful to choose probability distributions were we can analytically calculate the posterior probability or numerical approximation is reliable. Better yet, we sometimes don't need to bother with this normalization! The normalization term, $P(m)$, is the probability of the measurement. This does not depend on state so is essentially a constant we can often ignore. We can compare the unnormalized posterior distribution values for different states because how they relate to each other is unchanged when divided by the same constant. We will see how to do this to compare evidence for different hypotheses tomorrow. (It's also used to compare the likelihood of models fit using maximum likelihood estimation, as you did in W1D5.)
In this relatively simple example, we can compute the marginal probability $P(m)$ easily by using:
$$P(m) = \sum_s P(m | s) P(s)$$
We can then normalize so that we deal with the full posterior distribution.
## Math Exercise 5: Calculating a posterior probability
Our prior is $p(s = left) = 0.3$ and $p(s = right) = 0.7$. In the video, we learned that the chance of catching a fish given they fish on the same side as the school was 50%. Otherwise, it was 10%. We observe a person fishing on the left side. Our likelihood is:
| Likelihood: p(m \| s) | m = catch fish | m = no fish |
| ----------------- |----------|----------|
| s = left | 0.5 | 0.5 |
| s = right | 0.1 | 0.9 |
Calculate the posterior probability (on paper) that:
1. The school is on the left if the fisherperson catches a fish: $p(s = left | m = catch fish)$ (hint: normalize by compute $p(m = catch fish)$)
2. The school is on the right if the fisherperson does not catch a fish: $p(s = right | m = no fish)$
```
# to_remove explanation
# 1. Using Bayes rule, we know that P(s = left | m = catch fish) = P(m = catch fish | s = left)P(s = left) / P(m = catch fish)
#. Let's first compute P(m = catch fish):
#. P(m = catch fish) = P(m = catch fish | s = left)P(s = left) + P(m = catch fish | s = right)P(s = right)
# = 0.5 * 0.3 + .1*.7
# = 0.22
#. Now we can plug in all parts of Bayes rule:
# P(s = left | m = catch fish) = P(m = catch fish | s = left)P(s = left) / P(m = catch fish)
# = 0.5*0.3/0.22
# = 0.68
# 2. Using Bayes rule, we know that P(s = right | m = no fish) = P(m = no fish | s = right)P(s = right) / P(m = no fish)
#. Let's first compute P(m = no fish):
#. P(m = no fish) = P(m = no fish | s = left)P(s = left) + P(m = no fish | s = right)P(s = right)
# = 0.5 * 0.3 + .9*.7
# = 0.78
#. Now we can plug in all parts of Bayes rule:
# P(s = right | m = no fish) = P(m = no fish | s = right)P(s = right) / P(m = no fish)
# = 0.9*0.7/0.78
# = 0.81
```
## Coding Exercise 5: Computing Posteriors
Let's implement our above math to be able to compute posteriors for different priors and likelihood.s
As before, our prior is $p(s = left) = 0.3$ and $p(s = right) = 0.7$. In the video, we learned that the chance of catching a fish given they fish on the same side as the school was 50%. Otherwise, it was 10%. We observe a person fishing on the left side. Our likelihood is:
| Likelihood: p(m \| s) | m = catch fish | m = no fish |
| ----------------- |----------|----------|
| s = left | 0.5 | 0.5 |
| s = right | 0.1 | 0.9 |
We want our full posterior to take the same 2 by 2 form. Make sure the outputs match your math answers!
```
def compute_posterior(likelihood, prior):
""" Use Bayes' Rule to compute posterior from likelihood and prior
Args:
likelihood (ndarray): i x j array with likelihood probabilities where i is
number of state options, j is number of measurement options
prior (ndarray): i x 1 array with prior probability of each state
Returns:
ndarray: i x j array with posterior probabilities where i is
number of state options, j is number of measurement options
"""
#################################################
## TODO for students ##
# Fill out function and remove
raise NotImplementedError("Student exercise: implement compute_posterior")
#################################################
# Compute unnormalized posterior (likelihood times prior)
posterior = ... # first row is s = left, second row is s = right
# Compute p(m)
p_m = np.sum(posterior, axis = 0)
# Normalize posterior (divide elements by p_m)
posterior /= ...
return posterior
# Make prior
prior = np.array([0.3, 0.7]).reshape((2, 1)) # first row is s = left, second row is s = right
# Make likelihood
likelihood = np.array([[0.5, 0.5], [0.1, 0.9]]) # first row is s = left, second row is s = right
# Compute posterior
posterior = compute_posterior(likelihood, prior)
# Visualize
with plt.xkcd():
plot_prior_likelihood_posterior(prior, likelihood, posterior)
# to_remove solution
def compute_posterior(likelihood, prior):
""" Use Bayes' Rule to compute posterior from likelihood and prior
Args:
likelihood (ndarray): i x j array with likelihood probabilities where i is
number of state options, j is number of measurement options
prior (ndarray): i x 1 array with prior probability of each state
Returns:
ndarray: i x j array with posterior probabilities where i is
number of state options, j is number of measurement options
"""
# Compute unnormalized posterior (likelihood times prior)
posterior = likelihood * prior # first row is s = left, second row is s = right
# Compute p(m)
p_m = np.sum(posterior, axis = 0)
# Normalize posterior (divide elements by p_m)
posterior /= p_m
return posterior
# Make prior
prior = np.array([0.3, 0.7]).reshape((2, 1)) # first row is s = left, second row is s = right
# Make likelihood
likelihood = np.array([[0.5, 0.5], [0.1, 0.9]]) # first row is s = left, second row is s = right
# Compute posterior
posterior = compute_posterior(likelihood, prior)
# Visualize
with plt.xkcd():
plot_prior_likelihood_posterior(prior, likelihood, posterior)
```
## Interactive Demo 5: What affects the posterior?
Now that we can understand the implementation of *Bayes rule*, let's vary the parameters of the prior and likelihood to see how changing the prior and likelihood affect the posterior.
In the demo below, you can change the prior by playing with the slider for $p( s = left)$. You can also change the likelihood by changing the probability of catching a fish given that the school is on the left and the probability of catching a fish given that the school is on the right. The fisherperson you are observing is fishing on the left.
1. Keeping the likelihood constant, when does the prior have the strongest influence over the posterior? Meaning, when does the posterior look most like the prior no matter whether a fish was caught or not?
2. Keeping the likelihood constant, when does the prior exert the weakest influence? Meaning, when does the posterior look least like the prior and depend most on whether a fish was caught or not?
3. Set the prior probability of the state = left to 0.6 and play with the likelihood. When does the likelihood exert the most influence over the posterior?
```
# @markdown Execute this cell to enable the widget
style = {'description_width': 'initial'}
ps_widget = widgets.FloatSlider(0.3, description='p(s = left)',
min=0.01, max=0.99, step=0.01)
p_a_s1_widget = widgets.FloatSlider(0.5, description='p(fish | s = left)',
min=0.01, max=0.99, step=0.01, style=style)
p_a_s0_widget = widgets.FloatSlider(0.1, description='p(fish | s = right)',
min=0.01, max=0.99, step=0.01, style=style)
observed_widget = widgets.Checkbox(value=False, description='Observed fish (m)',
disabled=False, indent=False, layout={'width': 'max-content'})
@widgets.interact(
ps=ps_widget,
p_a_s1=p_a_s1_widget,
p_a_s0=p_a_s0_widget,
m_right=observed_widget
)
def make_prior_likelihood_plot(ps,p_a_s1,p_a_s0,m_right):
fig = plot_prior_likelihood(ps,p_a_s1,p_a_s0,m_right)
plt.show(fig)
plt.close(fig)
return None
# to_remove explanation
# 1). The prior exerts a strong influence over the posterior when it is very informative: when
#. the probability of the school being on one side or the other. If the prior that the fish are
#. on the left side is very high (like 0.9), the posterior probability of the state being left is
#. high regardless of the measurement.
# 2). The prior does not exert a strong influence when it is not informative: when the probabilities
#. of the school being on the left vs right are similar (both are 0.5 for example). In this case,
#. the posterior is more driven by the collected data (the measurement) and more closely resembles
#. the likelihood.
#. 3) Similarly to the prior, the likelihood exerts the most influence when it is informative: when catching
#. a fish tells you a lot of information about which state is likely. For example, if the probability of the
#. fisherperson catching a fish if he is fishing on the right side and the school is on the left is 0
#. (p fish | s = left) = 0 and the probability of catching a fish if the school is on the right is 1, the
#. prior does not affect the posterior at all. The measurement tells you the hidden state completely.
```
# Section 6: Making Bayesian fishing decisions
We will explore how to consider the expected utility of an action based on our belief (the posterior distribution) about where we think the fish are. Now we have all the components of a Bayesian decision: our prior information, the likelihood given a measurement, the posterior distribution (belief) and our utility (the gains and losses). This allows us to consider the relationship between the true value of the hidden state, $s$, and what we *expect* to get if we take action, $a$, based on our belief!
Let's use the following widget to think about the relationship between these probability distributions and utility function.
## Think! 6: What is more important, the probabilities or the utilities?
We are now going to put everything we've learned together to gain some intuitions for how each of the elements that goes into a Bayesian decision comes together. Remember, the common assumption in neuroscience, psychology, economics, ecology, etc. is that we (humans and animals) are tying to maximize our expected utility.
1. Can you find a situation where the expected utility is the same for both actions?
2. What is more important for determining the expected utility: the prior or a new measurement (the likelihood)?
3. Why is this a normative model?
4. Can you think of ways in which this model would need to be extended to describe human or animal behavior?
```
# @markdown Execute this cell to enable the widget
style = {'description_width': 'initial'}
ps_widget = widgets.FloatSlider(0.3, description='p(s)',
min=0.01, max=0.99, step=0.01)
p_a_s1_widget = widgets.FloatSlider(0.5, description='p(fish | s = left)',
min=0.01, max=0.99, step=0.01, style=style)
p_a_s0_widget = widgets.FloatSlider(0.1, description='p(fish | s = right)',
min=0.01, max=0.99, step=0.01, style=style)
observed_widget = widgets.Checkbox(value=False, description='Observed fish (m)',
disabled=False, indent=False, layout={'width': 'max-content'})
@widgets.interact(
ps=ps_widget,
p_a_s1=p_a_s1_widget,
p_a_s0=p_a_s0_widget,
m_right=observed_widget
)
def make_prior_likelihood_utility_plot(ps, p_a_s1, p_a_s0,m_right):
fig = plot_prior_likelihood_utility(ps, p_a_s1, p_a_s0,m_right)
plt.show(fig)
plt.close(fig)
return None
# to_remove explanation
#' 1. There are actually many (infinite) combinations that can produce the same
#. expected utility for both actions: but the posterior probabilities will always
# have to balance out the differences in the utility function. So, what is
# important is that for a given utility function, there will be some 'point
# of indifference'
#' 2. What matters is the relative information: if the prior is close to 50/50,
# then the likelihood has more infuence, if the likelihood is 50/50 given a
# measurement (the measurement is uninformative), the prior is more important.
# But the critical insite from Bayes Rule and the Bayesian approach is that what
# matters is the relative information you gain from a measurement, and that
# you can use all of this information for your decision.
#' 3. The model gives us a very precise way to think about how we *should* combine
# information and how we *should* act, GIVEN some assumption about our goals.
# In this case, if we assume we are trying to maximize expected utility--we can
# state what an animal or subject should do.
#' 4. There are lots of possible extensions. Humans may not always try to maximize
# utility; humans and animals might not be able to calculate or represent probabiltiy
# distributions exactly; The utility function might be more complicated; etc.
```
---
# Summary
In this tutorial, you learned about combining prior information with new measurements to update your knowledge using Bayes Rulem, in the context of a fishing problem.
Specifically, we covered:
* That the likelihood is the probability of the measurement given some hidden state
* That how the prior and likelihood interact to create the posterior, the probability of the hidden state given a measurement, depends on how they covary
* That utility is the gain from each action and state pair, and the expected utility for an action is the sum of the utility for all state pairs, weighted by the probability of that state happening. You can then choose the action with highest expected utility.
---
# Bonus
## Bonus Section 1: Correlation Formula
To understand the way we calculate the correlation, we need to review the definition of covariance and correlation.
Covariance:
$$
cov(X,Y) = \sigma_{XY} = E[(X - \mu_{x})(Y - \mu_{y})] = E[X]E[Y] - \mu_{x}\mu_{y}
$$
Correlation:
$$
\rho_{XY} = \frac{cov(Y,Y)}{\sqrt{V(X)V(Y)}} = \frac{\sigma_{XY}}{\sigma_{X}\sigma_{Y}}
$$
| github_jupyter |
```
ls -l| tail -10
#G4
from google.colab import drive
drive.mount('/content/gdrive')
cp gdrive/My\ Drive/fingerspelling5.tar.bz2 fingerspelling5.tar.bz2
# rm -r surrey/
%rm -r dataset5/
# rm fingerspelling5.tar.bz2
# cd /media/datastorage/Phong/
!tar xjf fingerspelling5.tar.bz2
cd dataset5
mkdir surrey
mkdir surrey/D
mv dataset5/* surrey/D/
cd surrey
cd ..
#remove depth files
import glob
import os
import shutil
# get parts of image's path
def get_image_parts(image_path):
"""Given a full path to an image, return its parts."""
parts = image_path.split(os.path.sep)
#print(parts)
filename = parts[2]
filename_no_ext = filename.split('.')[0]
classname = parts[1]
train_or_test = parts[0]
return train_or_test, classname, filename_no_ext, filename
#del_folders = ['A','B','C','D','E']
move_folders_1 = ['A','B','C','E']
move_folders_2 = ['D']
# look for all images in sub-folders
for folder in move_folders_1:
class_folders = glob.glob(os.path.join(folder, '*'))
for iid_class in class_folders:
#move depth files
class_files = glob.glob(os.path.join(iid_class, 'depth*.png'))
print('copying %d files' %(len(class_files)))
for idx in range(len(class_files)):
src = class_files[idx]
if "0001" not in src:
train_or_test, classname, _, filename = get_image_parts(src)
dst = os.path.join('train_depth', classname, train_or_test+'_'+ filename)
# image directory
img_directory = os.path.join('train_depth', classname)
# create folder if not existed
if not os.path.exists(img_directory):
os.makedirs(img_directory)
#copying
shutil.copy(src, dst)
else:
print('ignor: %s' %src)
#move color files
for iid_class in class_folders:
#move depth files
class_files = glob.glob(os.path.join(iid_class, 'color*.png'))
print('copying %d files' %(len(class_files)))
for idx in range(len(class_files)):
src = class_files[idx]
train_or_test, classname, _, filename = get_image_parts(src)
dst = os.path.join('train_color', classname, train_or_test+'_'+ filename)
# image directory
img_directory = os.path.join('train_color', classname)
# create folder if not existed
if not os.path.exists(img_directory):
os.makedirs(img_directory)
#copying
shutil.copy(src, dst)
# look for all images in sub-folders
for folder in move_folders_2:
class_folders = glob.glob(os.path.join(folder, '*'))
for iid_class in class_folders:
#move depth files
class_files = glob.glob(os.path.join(iid_class, 'depth*.png'))
print('copying %d files' %(len(class_files)))
for idx in range(len(class_files)):
src = class_files[idx]
if "0001" not in src:
train_or_test, classname, _, filename = get_image_parts(src)
dst = os.path.join('test_depth', classname, train_or_test+'_'+ filename)
# image directory
img_directory = os.path.join('test_depth', classname)
# create folder if not existed
if not os.path.exists(img_directory):
os.makedirs(img_directory)
#copying
shutil.copy(src, dst)
else:
print('ignor: %s' %src)
#move color files
for iid_class in class_folders:
#move depth files
class_files = glob.glob(os.path.join(iid_class, 'color*.png'))
print('copying %d files' %(len(class_files)))
for idx in range(len(class_files)):
src = class_files[idx]
train_or_test, classname, _, filename = get_image_parts(src)
dst = os.path.join('test_color', classname, train_or_test+'_'+ filename)
# image directory
img_directory = os.path.join('test_color', classname)
# create folder if not existed
if not os.path.exists(img_directory):
os.makedirs(img_directory)
#copying
shutil.copy(src, dst)
# #/content
%cd ..
ls -l
mkdir surrey/E/checkpoints
cd surrey/
#MUL 1 - Inception - ST
from keras.applications import MobileNet
# from keras.applications import InceptionV3
# from keras.applications import Xception
# from keras.applications.inception_resnet_v2 import InceptionResNetV2
# from tensorflow.keras.applications import EfficientNetB0
from keras.models import Model
from keras.layers import concatenate
from keras.layers import Dense, GlobalAveragePooling2D, Input, Embedding, SimpleRNN, LSTM, Flatten, GRU, Reshape
# from keras.applications.inception_v3 import preprocess_input
# from tensorflow.keras.applications.efficientnet import preprocess_input
from keras.applications.mobilenet import preprocess_input
from keras.layers import GaussianNoise
def get_adv_model():
# f1_base = EfficientNetB0(include_top=False, weights='imagenet',
# input_shape=(299, 299, 3),
# pooling='avg')
# f1_x = f1_base.output
f1_base = MobileNet(weights='imagenet', include_top=False, input_shape=(224,224,3))
f1_x = f1_base.output
f1_x = GlobalAveragePooling2D()(f1_x)
# f1_x = f1_base.layers[-151].output #layer 5
# f1_x = GlobalAveragePooling2D()(f1_x)
# f1_x = Flatten()(f1_x)
# f1_x = Reshape([1,1280])(f1_x)
# f1_x = SimpleRNN(2048,
# return_sequences=False,
# # dropout=0.8
# input_shape=[1,1280])(f1_x)
#Regularization with noise
f1_x = GaussianNoise(0.1)(f1_x)
f1_x = Dense(1024, activation='relu')(f1_x)
f1_x = Dense(24, activation='softmax')(f1_x)
model_1 = Model(inputs=[f1_base.input],outputs=[f1_x])
model_1.summary()
return model_1
from keras.callbacks import Callback
import pickle
import sys
#Stop training on val_acc
class EarlyStoppingByAccVal(Callback):
def __init__(self, monitor='val_acc', value=0.00001, verbose=0):
super(Callback, self).__init__()
self.monitor = monitor
self.value = value
self.verbose = verbose
def on_epoch_end(self, epoch, logs={}):
current = logs.get(self.monitor)
if current is None:
warnings.warn("Early stopping requires %s available!" % self.monitor, RuntimeWarning)
if current >= self.value:
if self.verbose > 0:
print("Epoch %05d: early stopping" % epoch)
self.model.stop_training = True
#Save large model using pickle formate instead of h5
class SaveCheckPoint(Callback):
def __init__(self, model, dest_folder):
super(Callback, self).__init__()
self.model = model
self.dest_folder = dest_folder
#initiate
self.best_val_acc = 0
self.best_val_loss = sys.maxsize #get max value
def on_epoch_end(self, epoch, logs={}):
val_acc = logs['val_acc']
val_loss = logs['val_loss']
if val_acc > self.best_val_acc:
self.best_val_acc = val_acc
# Save weights in pickle format instead of h5
print('\nSaving val_acc %f at %s' %(self.best_val_acc, self.dest_folder))
weigh= self.model.get_weights()
#now, use pickle to save your model weights, instead of .h5
#for heavy model architectures, .h5 file is unsupported.
fpkl= open(self.dest_folder, 'wb') #Python 3
pickle.dump(weigh, fpkl, protocol= pickle.HIGHEST_PROTOCOL)
fpkl.close()
# model.save('tmp.h5')
elif val_acc == self.best_val_acc:
if val_loss < self.best_val_loss:
self.best_val_loss=val_loss
# Save weights in pickle format instead of h5
print('\nSaving val_acc %f at %s' %(self.best_val_acc, self.dest_folder))
weigh= self.model.get_weights()
#now, use pickle to save your model weights, instead of .h5
#for heavy model architectures, .h5 file is unsupported.
fpkl= open(self.dest_folder, 'wb') #Python 3
pickle.dump(weigh, fpkl, protocol= pickle.HIGHEST_PROTOCOL)
fpkl.close()
# Training
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau
from keras.optimizers import Adam
import time, os
from math import ceil
train_datagen = ImageDataGenerator(
# rescale = 1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.3,
zoom_range=0.3,
# horizontal_flip=True,
# vertical_flip=True,##
# brightness_range=[0.5, 1.5],##
channel_shift_range=10,##
fill_mode='nearest',
# preprocessing_function=get_cutout_v2(),
preprocessing_function=preprocess_input,
)
test_datagen = ImageDataGenerator(
# rescale = 1./255
preprocessing_function=preprocess_input
)
NUM_GPU = 1
batch_size = 64
train_set = train_datagen.flow_from_directory('surrey/D/train_color/',
target_size = (224, 224),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=True,
seed=7,
# subset="training"
)
valid_set = test_datagen.flow_from_directory('surrey/D/test_color/',
target_size = (224, 224),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=False,
seed=7,
# subset="validation"
)
model_txt = 'st'
# Helper: Save the model.
savedfilename = os.path.join('surrey', 'D', 'checkpoints', 'Surrey_MobileNet_D_tmp.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('svhn_output', 'logs', model_txt))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('svhn_output', 'logs', model_txt + '-' + 'training-' + \
str(timestamp) + '.log'))
earlystopping = EarlyStoppingByAccVal(monitor='val_accuracy', value=0.9900, verbose=1)
epochs = 40##!!!
lr = 1e-3
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
# train on multiple-gpus
# Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
print("Number of GPUs: {}".format(strategy.num_replicas_in_sync))
# Open a strategy scope.
with strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & `compile()`.
model_mul = get_adv_model()
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
step_size_train=ceil(train_set.n/train_set.batch_size)
step_size_valid=ceil(valid_set.n/valid_set.batch_size)
# step_size_test=ceil(testing_set.n//testing_set.batch_size)
# result = model_mul.fit_generator(
# generator = train_set,
# steps_per_epoch = step_size_train,
# validation_data = valid_set,
# validation_steps = step_size_valid,
# shuffle=True,
# epochs=epochs,
# callbacks=[checkpointer],
# # callbacks=[csv_logger, checkpointer, earlystopping],
# # callbacks=[tb, csv_logger, checkpointer, earlystopping],
# verbose=1)
# Training
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, CSVLogger, ReduceLROnPlateau
from keras.optimizers import Adam
import time, os
from math import ceil
train_datagen = ImageDataGenerator(
# rescale = 1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.3,
zoom_range=0.3,
# horizontal_flip=True,
# vertical_flip=True,##
# brightness_range=[0.5, 1.5],##
channel_shift_range=10,##
fill_mode='nearest',
# preprocessing_function=get_cutout_v2(),
preprocessing_function=preprocess_input,
)
test_datagen = ImageDataGenerator(
# rescale = 1./255
preprocessing_function=preprocess_input
)
NUM_GPU = 1
batch_size = 64
train_set = train_datagen.flow_from_directory('surrey/D/train_color/',
target_size = (224, 224),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=True,
seed=7,
# subset="training"
)
valid_set = test_datagen.flow_from_directory('surrey/D/test_color/',
target_size = (224, 224),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=False,
seed=7,
# subset="validation"
)
model_txt = 'st'
# Helper: Save the model.
savedfilename = os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
# Helper: TensorBoard
tb = TensorBoard(log_dir=os.path.join('svhn_output', 'logs', model_txt))
# Helper: Save results.
timestamp = time.time()
csv_logger = CSVLogger(os.path.join('svhn_output', 'logs', model_txt + '-' + 'training-' + \
str(timestamp) + '.log'))
earlystopping = EarlyStoppingByAccVal(monitor='val_accuracy', value=0.9900, verbose=1)
epochs = 40##!!!
lr = 1e-3
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
# train on multiple-gpus
# Create a MirroredStrategy.
strategy = tf.distribute.MirroredStrategy()
print("Number of GPUs: {}".format(strategy.num_replicas_in_sync))
# Open a strategy scope.
with strategy.scope():
# Everything that creates variables should be under the strategy scope.
# In general this is only model construction & `compile()`.
model_mul = get_adv_model()
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
step_size_train=ceil(train_set.n/train_set.batch_size)
step_size_valid=ceil(valid_set.n/valid_set.batch_size)
# step_size_test=ceil(testing_set.n//testing_set.batch_size)
result = model_mul.fit_generator(
generator = train_set,
steps_per_epoch = step_size_train,
validation_data = valid_set,
validation_steps = step_size_valid,
shuffle=True,
epochs=epochs,
callbacks=[checkpointer],
# callbacks=[csv_logger, checkpointer, earlystopping],
# callbacks=[tb, csv_logger, checkpointer, earlystopping],
verbose=1)
ls -l
# Open a strategy scope.
with strategy.scope():
model_mul.load_weights(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D.hdf5'))
model_mul.evaluate(valid_set)
# Helper: Save the model.
savedfilename = os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L2.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
epochs = 15##!!!
lr = 1e-4
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
# Open a strategy scope.
with strategy.scope():
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
result = model_mul.fit_generator(
generator = train_set,
steps_per_epoch = step_size_train,
validation_data = valid_set,
validation_steps = step_size_valid,
shuffle=True,
epochs=epochs,
callbacks=[checkpointer],
# callbacks=[csv_logger, checkpointer, earlystopping],
# callbacks=[tb, csv_logger, checkpointer, earlystopping],
verbose=1)
# Open a strategy scope.
with strategy.scope():
model_mul.load_weights(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L2.hdf5'))
model_mul.evaluate(valid_set)
# Helper: Save the model.
savedfilename = os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L3.hdf5')
checkpointer = ModelCheckpoint(savedfilename,
monitor='val_accuracy', verbose=1,
save_best_only=True, mode='max',save_weights_only=True)########
epochs = 15##!!!
lr = 1e-5
decay = lr/epochs
optimizer = Adam(lr=lr, decay=decay)
# Open a strategy scope.
with strategy.scope():
model_mul.compile(optimizer=optimizer,loss='categorical_crossentropy',metrics=['accuracy'])
result = model_mul.fit_generator(
generator = train_set,
steps_per_epoch = step_size_train,
validation_data = valid_set,
validation_steps = step_size_valid,
shuffle=True,
epochs=epochs,
callbacks=[checkpointer],
# callbacks=[csv_logger, checkpointer, earlystopping],
# callbacks=[tb, csv_logger, checkpointer, earlystopping],
verbose=1)
# Open a strategy scope.
with strategy.scope():
model_mul.load_weights(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', '5_Surrey_MobileNet_D_L3.hdf5'))
model_mul.evaluate(valid_set)
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
import time, os
from math import ceil
# PREDICT ON OFFICIAL TEST
train_datagen = ImageDataGenerator(
# rescale = 1./255,
rotation_range=30,
width_shift_range=0.3,
height_shift_range=0.3,
shear_range=0.3,
zoom_range=0.3,
# horizontal_flip=True,
# vertical_flip=True,##
# brightness_range=[0.5, 1.5],##
channel_shift_range=10,##
fill_mode='nearest',
preprocessing_function=preprocess_input,
)
test_datagen1 = ImageDataGenerator(
# rescale = 1./255,
preprocessing_function=preprocess_input
)
batch_size = 64
train_set = train_datagen.flow_from_directory('surrey/D/train_color/',
target_size = (224, 224),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=True,
seed=7,
# subset="training"
)
test_set1 = test_datagen1.flow_from_directory('surrey/D/test_color/',
target_size = (224, 224),
batch_size = batch_size,
class_mode = 'categorical',
shuffle=False,
seed=7,
# subset="validation"
)
# if NUM_GPU != 1:
predict1=model_mul.predict_generator(test_set1, steps = ceil(test_set1.n/test_set1.batch_size),verbose=1)
# else:
# predict1=model.predict_generator(test_set1, steps = ceil(test_set1.n/test_set1.batch_size),verbose=1)
predicted_class_indices=np.argmax(predict1,axis=1)
labels = (train_set.class_indices)
labels = dict((v,k) for k,v in labels.items())
predictions1 = [labels[k] for k in predicted_class_indices]
import pandas as pd
filenames=test_set1.filenames
results=pd.DataFrame({"file_name":filenames,
"predicted1":predictions1,
})
results.to_csv('Surrey_MobileNet_D_L3_0902.csv')
results.head()
np.save(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', 'npy', '5Colab_Surrey_MobileNet_D_L2_0902.hdf5'), predict1)
np.save(os.path.join('gdrive', 'My Drive', 'Surrey_ASL', 'npy', '5Colab_Surrey_MobileNet_D_L3_0902.hdf5'), predict1)
from sklearn.metrics import classification_report, confusion_matrix
import numpy as np
test_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input)
testing_set = test_datagen.flow_from_directory('surrey/D/test_color/',
target_size = (224, 224),
batch_size = 32,
class_mode = 'categorical',
seed=7,
shuffle=False
# subset="validation"
)
y_pred = model_mul.predict_generator(testing_set)
y_pred = np.argmax(y_pred, axis=1)
y_true = testing_set.classes
print(confusion_matrix(y_true, y_pred))
# print(model.evaluate_generator(testing_set,
# steps = testing_set.n//testing_set.batch_size))
```
| github_jupyter |
## Dependencies
```
# !pip install --quiet efficientnet
!pip install --quiet image-classifiers
import warnings, json, re, glob, math
from scripts_step_lr_schedulers import *
from melanoma_utility_scripts import *
from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import KFold
import tensorflow.keras.layers as L
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras import optimizers, layers, metrics, losses, Model
# import efficientnet.tfkeras as efn
from classification_models.tfkeras import Classifiers
import tensorflow_addons as tfa
SEED = 0
seed_everything(SEED)
warnings.filterwarnings("ignore")
```
## TPU configuration
```
strategy, tpu = set_up_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
AUTO = tf.data.experimental.AUTOTUNE
```
# Model parameters
```
config = {
"HEIGHT": 256,
"WIDTH": 256,
"CHANNELS": 3,
"BATCH_SIZE": 64,
"EPOCHS": 25,
"LEARNING_RATE": 3e-4,
"ES_PATIENCE": 10,
"N_FOLDS": 5,
"N_USED_FOLDS": 5,
"TTA_STEPS": 25,
"BASE_MODEL": 'seresnet18',
"BASE_MODEL_WEIGHTS": 'imagenet',
"DATASET_PATH": 'melanoma-256x256'
}
with open('config.json', 'w') as json_file:
json.dump(json.loads(json.dumps(config)), json_file)
config
```
# Load data
```
database_base_path = '/kaggle/input/siim-isic-melanoma-classification/'
k_fold = pd.read_csv(database_base_path + 'train.csv')
test = pd.read_csv(database_base_path + 'test.csv')
print('Train samples: %d' % len(k_fold))
display(k_fold.head())
print(f'Test samples: {len(test)}')
display(test.head())
GCS_PATH = KaggleDatasets().get_gcs_path(config['DATASET_PATH'])
TRAINING_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/train*.tfrec')
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + '/test*.tfrec')
```
# Augmentations
```
def data_augment(image, label):
p_spatial = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_spatial2 = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_rotate = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_crop = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
p_pixel = tf.random.uniform([1], minval=0, maxval=1, dtype='float32')
### Spatial-level transforms
if p_spatial >= .2: # flips
image['input_image'] = tf.image.random_flip_left_right(image['input_image'])
image['input_image'] = tf.image.random_flip_up_down(image['input_image'])
if p_spatial >= .7:
image['input_image'] = tf.image.transpose(image['input_image'])
if p_rotate >= .8: # rotate 270º
image['input_image'] = tf.image.rot90(image['input_image'], k=3)
elif p_rotate >= .6: # rotate 180º
image['input_image'] = tf.image.rot90(image['input_image'], k=2)
elif p_rotate >= .4: # rotate 90º
image['input_image'] = tf.image.rot90(image['input_image'], k=1)
if p_spatial2 >= .6:
if p_spatial2 >= .9:
image['input_image'] = transform_rotation(image['input_image'], config['HEIGHT'], 180.)
elif p_spatial2 >= .8:
image['input_image'] = transform_zoom(image['input_image'], config['HEIGHT'], 8., 8.)
elif p_spatial2 >= .7:
image['input_image'] = transform_shift(image['input_image'], config['HEIGHT'], 8., 8.)
else:
image['input_image'] = transform_shear(image['input_image'], config['HEIGHT'], 2.)
if p_crop >= .6: # crops
if p_crop >= .8:
image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.8), int(config['WIDTH']*.8), config['CHANNELS']])
elif p_crop >= .7:
image['input_image'] = tf.image.random_crop(image['input_image'], size=[int(config['HEIGHT']*.9), int(config['WIDTH']*.9), config['CHANNELS']])
else:
image['input_image'] = tf.image.central_crop(image['input_image'], central_fraction=.8)
image['input_image'] = tf.image.resize(image['input_image'], size=[config['HEIGHT'], config['WIDTH']])
if p_pixel >= .6: # Pixel-level transforms
if p_pixel >= .9:
image['input_image'] = tf.image.random_hue(image['input_image'], 0.01)
elif p_pixel >= .8:
image['input_image'] = tf.image.random_saturation(image['input_image'], 0.7, 1.3)
elif p_pixel >= .7:
image['input_image'] = tf.image.random_contrast(image['input_image'], 0.8, 1.2)
else:
image['input_image'] = tf.image.random_brightness(image['input_image'], 0.1)
return image, label
```
## Auxiliary functions
```
# Datasets utility functions
def read_labeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'], height, width, channels)
label = tf.cast(example['target'], tf.float32)
# meta features
data = {}
data['patient_id'] = tf.cast(example['patient_id'], tf.int32)
data['sex'] = tf.cast(example['sex'], tf.int32)
data['age_approx'] = tf.cast(example['age_approx'], tf.int32)
data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)
return {'input_image': image, 'input_meta': data}, label # returns a dataset of (image, data, label)
def read_labeled_tfrecord_eval(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):
example = tf.io.parse_single_example(example, LABELED_TFREC_FORMAT)
image = decode_image(example['image'], height, width, channels)
label = tf.cast(example['target'], tf.float32)
image_name = example['image_name']
# meta features
data = {}
data['patient_id'] = tf.cast(example['patient_id'], tf.int32)
data['sex'] = tf.cast(example['sex'], tf.int32)
data['age_approx'] = tf.cast(example['age_approx'], tf.int32)
data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)
return {'input_image': image, 'input_meta': data}, label, image_name # returns a dataset of (image, data, label, image_name)
def load_dataset(filenames, ordered=False, buffer_size=-1):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False # disable order, increase speed
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files
dataset = dataset.with_options(ignore_order) # uses data as soon as it streams in, rather than in its original order
dataset = dataset.map(read_labeled_tfrecord, num_parallel_calls=buffer_size)
return dataset # returns a dataset of (image, data, label)
def load_dataset_eval(filenames, buffer_size=-1):
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files
dataset = dataset.map(read_labeled_tfrecord_eval, num_parallel_calls=buffer_size)
return dataset # returns a dataset of (image, data, label, image_name)
def get_training_dataset(filenames, batch_size, buffer_size=-1):
dataset = load_dataset(filenames, ordered=False, buffer_size=buffer_size)
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.repeat() # the training dataset must repeat for several epochs
dataset = dataset.shuffle(2048)
dataset = dataset.batch(batch_size, drop_remainder=True) # slighly faster with fixed tensor sizes
dataset = dataset.prefetch(buffer_size) # prefetch next batch while training (autotune prefetch buffer size)
return dataset
def get_validation_dataset(filenames, ordered=True, repeated=False, batch_size=32, buffer_size=-1):
dataset = load_dataset(filenames, ordered=ordered, buffer_size=buffer_size)
if repeated:
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(batch_size, drop_remainder=repeated)
dataset = dataset.prefetch(buffer_size)
return dataset
def get_eval_dataset(filenames, batch_size=32, buffer_size=-1):
dataset = load_dataset_eval(filenames, buffer_size=buffer_size)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size)
return dataset
# Test function
def read_unlabeled_tfrecord(example, height=config['HEIGHT'], width=config['WIDTH'], channels=config['CHANNELS']):
example = tf.io.parse_single_example(example, UNLABELED_TFREC_FORMAT)
image = decode_image(example['image'], height, width, channels)
image_name = example['image_name']
# meta features
data = {}
data['patient_id'] = tf.cast(example['patient_id'], tf.int32)
data['sex'] = tf.cast(example['sex'], tf.int32)
data['age_approx'] = tf.cast(example['age_approx'], tf.int32)
data['anatom_site_general_challenge'] = tf.cast(tf.one_hot(example['anatom_site_general_challenge'], 7), tf.int32)
return {'input_image': image, 'input_tabular': data}, image_name # returns a dataset of (image, data, image_name)
def load_dataset_test(filenames, buffer_size=-1):
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=buffer_size) # automatically interleaves reads from multiple files
dataset = dataset.map(read_unlabeled_tfrecord, num_parallel_calls=buffer_size)
# returns a dataset of (image, data, label, image_name) pairs if labeled=True or (image, data, image_name) pairs if labeled=False
return dataset
def get_test_dataset(filenames, batch_size=32, buffer_size=-1, tta=False):
dataset = load_dataset_test(filenames, buffer_size=buffer_size)
if tta:
dataset = dataset.map(data_augment, num_parallel_calls=AUTO)
dataset = dataset.batch(batch_size, drop_remainder=False)
dataset = dataset.prefetch(buffer_size)
return dataset
# Advanced augmentations
def transform_rotation(image, height, rotation):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly rotated
DIM = height
XDIM = DIM%2 #fix for size 331
rotation = rotation * tf.random.normal([1],dtype='float32')
# CONVERT DEGREES TO RADIANS
rotation = math.pi * rotation / 180.
# ROTATION MATRIX
c1 = tf.math.cos(rotation)
s1 = tf.math.sin(rotation)
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
rotation_matrix = tf.reshape( tf.concat([c1,s1,zero, -s1,c1,zero, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(rotation_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_shear(image, height, shear):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly sheared
DIM = height
XDIM = DIM%2 #fix for size 331
shear = shear * tf.random.normal([1],dtype='float32')
shear = math.pi * shear / 180.
# SHEAR MATRIX
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
c2 = tf.math.cos(shear)
s2 = tf.math.sin(shear)
shear_matrix = tf.reshape( tf.concat([one,s2,zero, zero,c2,zero, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(shear_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_shift(image, height, h_shift, w_shift):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly shifted
DIM = height
XDIM = DIM%2 #fix for size 331
height_shift = h_shift * tf.random.normal([1],dtype='float32')
width_shift = w_shift * tf.random.normal([1],dtype='float32')
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
# SHIFT MATRIX
shift_matrix = tf.reshape( tf.concat([one,zero,height_shift, zero,one,width_shift, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(shift_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
def transform_zoom(image, height, h_zoom, w_zoom):
# input image - is one image of size [dim,dim,3] not a batch of [b,dim,dim,3]
# output - image randomly zoomed
DIM = height
XDIM = DIM%2 #fix for size 331
height_zoom = 1.0 + tf.random.normal([1],dtype='float32')/h_zoom
width_zoom = 1.0 + tf.random.normal([1],dtype='float32')/w_zoom
one = tf.constant([1],dtype='float32')
zero = tf.constant([0],dtype='float32')
# ZOOM MATRIX
zoom_matrix = tf.reshape( tf.concat([one/height_zoom,zero,zero, zero,one/width_zoom,zero, zero,zero,one],axis=0),[3,3] )
# LIST DESTINATION PIXEL INDICES
x = tf.repeat( tf.range(DIM//2,-DIM//2,-1), DIM )
y = tf.tile( tf.range(-DIM//2,DIM//2),[DIM] )
z = tf.ones([DIM*DIM],dtype='int32')
idx = tf.stack( [x,y,z] )
# ROTATE DESTINATION PIXELS ONTO ORIGIN PIXELS
idx2 = K.dot(zoom_matrix,tf.cast(idx,dtype='float32'))
idx2 = K.cast(idx2,dtype='int32')
idx2 = K.clip(idx2,-DIM//2+XDIM+1,DIM//2)
# FIND ORIGIN PIXEL VALUES
idx3 = tf.stack( [DIM//2-idx2[0,], DIM//2-1+idx2[1,]] )
d = tf.gather_nd(image, tf.transpose(idx3))
return tf.reshape(d,[DIM,DIM,3])
```
## Learning rate scheduler
```
lr_min = 1e-6
# lr_start = 0
lr_max = config['LEARNING_RATE']
steps_per_epoch = 24844 // config['BATCH_SIZE']
total_steps = config['EPOCHS'] * steps_per_epoch
warmup_steps = steps_per_epoch * 5
# hold_max_steps = 0
# step_decay = .8
# step_size = steps_per_epoch * 1
# rng = [i for i in range(0, total_steps, 32)]
# y = [step_schedule_with_warmup(tf.cast(x, tf.float32), step_size=step_size,
# warmup_steps=warmup_steps, hold_max_steps=hold_max_steps,
# lr_start=lr_start, lr_max=lr_max, step_decay=step_decay) for x in rng]
# sns.set(style="whitegrid")
# fig, ax = plt.subplots(figsize=(20, 6))
# plt.plot(rng, y)
# print("Learning rate schedule: {:.3g} to {:.3g} to {:.3g}".format(y[0], max(y), y[-1]))
```
# Model
```
# Initial bias
pos = len(k_fold[k_fold['target'] == 1])
neg = len(k_fold[k_fold['target'] == 0])
initial_bias = np.log([pos/neg])
print('Bias')
print(pos)
print(neg)
print(initial_bias)
# class weights
total = len(k_fold)
weight_for_0 = (1 / neg)*(total)/2.0
weight_for_1 = (1 / pos)*(total)/2.0
class_weight = {0: weight_for_0, 1: weight_for_1}
print('Class weight')
print(class_weight)
def model_fn(input_shape):
input_image = L.Input(shape=input_shape, name='input_image')
BaseModel, preprocess_input = Classifiers.get(config['BASE_MODEL'])
base_model = BaseModel(input_shape=input_shape,
weights=config['BASE_MODEL_WEIGHTS'],
include_top=False)
x = base_model(input_image)
x = L.GlobalAveragePooling2D()(x)
output = L.Dense(1, activation='sigmoid', name='output',
bias_initializer=tf.keras.initializers.Constant(initial_bias))(x)
model = Model(inputs=input_image, outputs=output)
return model
```
# Training
```
# Evaluation
eval_dataset = get_eval_dataset(TRAINING_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO)
image_names = next(iter(eval_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(count_data_items(TRAINING_FILENAMES)))).numpy().astype('U')
image_data = eval_dataset.map(lambda data, label, image_name: data)
# Resample dataframe
k_fold = k_fold[k_fold['image_name'].isin(image_names)]
# Test
NUM_TEST_IMAGES = len(test)
test_preds = np.zeros((NUM_TEST_IMAGES, 1))
test_preds_last = np.zeros((NUM_TEST_IMAGES, 1))
test_dataset = get_test_dataset(TEST_FILENAMES, batch_size=config['BATCH_SIZE'], buffer_size=AUTO, tta=True)
image_names_test = next(iter(test_dataset.unbatch().map(lambda data, image_name: image_name).batch(NUM_TEST_IMAGES))).numpy().astype('U')
test_image_data = test_dataset.map(lambda data, image_name: data)
history_list = []
k_fold_best = k_fold.copy()
kfold = KFold(config['N_FOLDS'], shuffle=True, random_state=SEED)
for n_fold, (trn_idx, val_idx) in enumerate(kfold.split(TRAINING_FILENAMES)):
if n_fold < config['N_USED_FOLDS']:
n_fold +=1
print('\nFOLD: %d' % (n_fold))
# tf.tpu.experimental.initialize_tpu_system(tpu)
K.clear_session()
### Data
train_filenames = np.array(TRAINING_FILENAMES)[trn_idx]
valid_filenames = np.array(TRAINING_FILENAMES)[val_idx]
steps_per_epoch = count_data_items(train_filenames) // config['BATCH_SIZE']
# Train model
model_path = f'model_fold_{n_fold}.h5'
es = EarlyStopping(monitor='val_auc', mode='max', patience=config['ES_PATIENCE'],
restore_best_weights=False, verbose=1)
checkpoint = ModelCheckpoint(model_path, monitor='val_auc', mode='max',
save_best_only=True, save_weights_only=True)
with strategy.scope():
model = model_fn((config['HEIGHT'], config['WIDTH'], config['CHANNELS']))
optimizer = tfa.optimizers.RectifiedAdam(lr=lr_max,
total_steps=total_steps,
warmup_proportion=(warmup_steps / total_steps),
min_lr=lr_min)
model.compile(optimizer, loss=losses.BinaryCrossentropy(label_smoothing=0.05),
metrics=[metrics.AUC()])
history = model.fit(get_training_dataset(train_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO),
validation_data=get_validation_dataset(valid_filenames, ordered=True, repeated=False,
batch_size=config['BATCH_SIZE'], buffer_size=AUTO),
epochs=config['EPOCHS'],
steps_per_epoch=steps_per_epoch,
callbacks=[checkpoint, es],
class_weight=class_weight,
verbose=2).history
# save last epoch weights
model.save_weights('last_' + model_path)
history_list.append(history)
# Get validation IDs
valid_dataset = get_eval_dataset(valid_filenames, batch_size=config['BATCH_SIZE'], buffer_size=AUTO)
valid_image_names = next(iter(valid_dataset.unbatch().map(lambda data, label, image_name: image_name).batch(count_data_items(valid_filenames)))).numpy().astype('U')
k_fold[f'fold_{n_fold}'] = k_fold.apply(lambda x: 'validation' if x['image_name'] in valid_image_names else 'train', axis=1)
k_fold_best[f'fold_{n_fold}'] = k_fold_best.apply(lambda x: 'validation' if x['image_name'] in valid_image_names else 'train', axis=1)
##### Last model #####
print('Last model evaluation...')
preds = model.predict(image_data)
name_preds_eval = dict(zip(image_names, preds.reshape(len(preds))))
k_fold[f'pred_fold_{n_fold}'] = k_fold.apply(lambda x: name_preds_eval[x['image_name']], axis=1)
print(f'Last model inference (TTA {config["TTA_STEPS"]} steps)...')
for step in range(config['TTA_STEPS']):
test_preds_last += model.predict(test_image_data)
##### Best model #####
print('Best model evaluation...')
model.load_weights(model_path)
preds = model.predict(image_data)
name_preds_eval = dict(zip(image_names, preds.reshape(len(preds))))
k_fold_best[f'pred_fold_{n_fold}'] = k_fold_best.apply(lambda x: name_preds_eval[x['image_name']], axis=1)
print(f'Best model inference (TTA {config["TTA_STEPS"]} steps)...')
for step in range(config['TTA_STEPS']):
test_preds += model.predict(test_image_data)
# normalize preds
test_preds /= (config['N_USED_FOLDS'] * config['TTA_STEPS'])
test_preds_last /= (config['N_USED_FOLDS'] * config['TTA_STEPS'])
name_preds = dict(zip(image_names_test, test_preds.reshape(NUM_TEST_IMAGES)))
name_preds_last = dict(zip(image_names_test, test_preds_last.reshape(NUM_TEST_IMAGES)))
test['target'] = test.apply(lambda x: name_preds[x['image_name']], axis=1)
test['target_last'] = test.apply(lambda x: name_preds_last[x['image_name']], axis=1)
```
## Model loss graph
```
for n_fold in range(config['N_USED_FOLDS']):
print(f'Fold: {n_fold + 1}')
plot_metrics(history_list[n_fold])
```
## Model loss graph aggregated
```
plot_metrics_agg(history_list, config['N_USED_FOLDS'])
```
# Model evaluation (best)
```
display(evaluate_model(k_fold_best, config['N_USED_FOLDS']).style.applymap(color_map))
display(evaluate_model_Subset(k_fold_best, config['N_USED_FOLDS']).style.applymap(color_map))
```
# Model evaluation (last)
```
display(evaluate_model(k_fold, config['N_USED_FOLDS']).style.applymap(color_map))
display(evaluate_model_Subset(k_fold, config['N_USED_FOLDS']).style.applymap(color_map))
```
# Confusion matrix
```
for n_fold in range(config['N_USED_FOLDS']):
n_fold += 1
pred_col = f'pred_fold_{n_fold}'
train_set = k_fold_best[k_fold_best[f'fold_{n_fold}'] == 'train']
valid_set = k_fold_best[k_fold_best[f'fold_{n_fold}'] == 'validation']
print(f'Fold: {n_fold}')
plot_confusion_matrix(train_set['target'], np.round(train_set[pred_col]),
valid_set['target'], np.round(valid_set[pred_col]))
```
# Visualize predictions
```
k_fold['pred'] = 0
for n_fold in range(config['N_USED_FOLDS']):
k_fold['pred'] += k_fold[f'pred_fold_{n_fold+1}'] / config['N_FOLDS']
print('Label/prediction distribution')
print(f"Train positive labels: {len(k_fold[k_fold['target'] > .5])}")
print(f"Train positive predictions: {len(k_fold[k_fold['pred'] > .5])}")
print(f"Train positive correct predictions: {len(k_fold[(k_fold['target'] > .5) & (k_fold['pred'] > .5)])}")
print('Top 10 samples')
display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',
'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].head(10))
print('Top 10 positive samples')
display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',
'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('target == 1').head(10))
print('Top 10 predicted positive samples')
display(k_fold[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'diagnosis',
'target', 'pred'] + [c for c in k_fold.columns if (c.startswith('pred_fold'))]].query('pred > .5').head(10))
```
# Visualize test predictions
```
print(f"Test predictions {len(test[test['target'] > .5])}|{len(test[test['target'] <= .5])}")
print(f"Test predictions (last) {len(test[test['target_last'] > .5])}|{len(test[test['target_last'] <= .5])}")
print('Top 10 samples')
display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last'] +
[c for c in test.columns if (c.startswith('pred_fold'))]].head(10))
print('Top 10 positive samples')
display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last'] +
[c for c in test.columns if (c.startswith('pred_fold'))]].query('target > .5').head(10))
print('Top 10 positive samples (last)')
display(test[['image_name', 'sex', 'age_approx','anatom_site_general_challenge', 'target', 'target_last'] +
[c for c in test.columns if (c.startswith('pred_fold'))]].query('target_last > .5').head(10))
```
# Test set predictions
```
submission = pd.read_csv(database_base_path + 'sample_submission.csv')
submission['target'] = test['target']
submission['target_last'] = test['target_last']
submission['target_blend'] = (test['target'] * .5) + (test['target_last'] * .5)
display(submission.head(10))
display(submission.describe())
### BEST ###
submission[['image_name', 'target']].to_csv('submission.csv', index=False)
### LAST ###
submission_last = submission[['image_name', 'target_last']]
submission_last.columns = ['image_name', 'target']
submission_last.to_csv('submission_last.csv', index=False)
### BLEND ###
submission_blend = submission[['image_name', 'target_blend']]
submission_blend.columns = ['image_name', 'target']
submission_blend.to_csv('submission_blend.csv', index=False)
```
| github_jupyter |
# CTA data analysis with Gammapy
## Introduction
**This notebook shows an example how to make a sky image and spectrum for simulated CTA data with Gammapy.**
The dataset we will use is three observation runs on the Galactic center. This is a tiny (and thus quick to process and play with and learn) subset of the simulated CTA dataset that was produced for the first data challenge in August 2017.
## Setup
As usual, we'll start with some setup ...
```
%matplotlib inline
import matplotlib.pyplot as plt
!gammapy info --no-envvar --no-system
import numpy as np
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.convolution import Gaussian2DKernel
from regions import CircleSkyRegion
from gammapy.modeling import Fit
from gammapy.data import DataStore
from gammapy.datasets import (
Datasets,
FluxPointsDataset,
SpectrumDataset,
MapDataset,
)
from gammapy.modeling.models import (
PowerLawSpectralModel,
SkyModel,
GaussianSpatialModel,
)
from gammapy.maps import MapAxis, WcsNDMap, WcsGeom, RegionGeom
from gammapy.makers import (
MapDatasetMaker,
SafeMaskMaker,
SpectrumDatasetMaker,
ReflectedRegionsBackgroundMaker,
)
from gammapy.estimators import TSMapEstimator, FluxPointsEstimator
from gammapy.estimators.utils import find_peaks
from gammapy.visualization import plot_spectrum_datasets_off_regions
# Configure the logger, so that the spectral analysis
# isn't so chatty about what it's doing.
import logging
logging.basicConfig()
log = logging.getLogger("gammapy.spectrum")
log.setLevel(logging.ERROR)
```
## Select observations
A Gammapy analysis usually starts by creating a `~gammapy.data.DataStore` and selecting observations.
This is shown in detail in the other notebook, here we just pick three observations near the galactic center.
```
data_store = DataStore.from_dir("$GAMMAPY_DATA/cta-1dc/index/gps")
# Just as a reminder: this is how to select observations
# from astropy.coordinates import SkyCoord
# table = data_store.obs_table
# pos_obs = SkyCoord(table['GLON_PNT'], table['GLAT_PNT'], frame='galactic', unit='deg')
# pos_target = SkyCoord(0, 0, frame='galactic', unit='deg')
# offset = pos_target.separation(pos_obs).deg
# mask = (1 < offset) & (offset < 2)
# table = table[mask]
# table.show_in_browser(jsviewer=True)
obs_id = [110380, 111140, 111159]
observations = data_store.get_observations(obs_id)
obs_cols = ["OBS_ID", "GLON_PNT", "GLAT_PNT", "LIVETIME"]
data_store.obs_table.select_obs_id(obs_id)[obs_cols]
```
## Make sky images
### Define map geometry
Select the target position and define an ON region for the spectral analysis
```
axis = MapAxis.from_edges(
np.logspace(-1.0, 1.0, 10), unit="TeV", name="energy", interp="log"
)
geom = WcsGeom.create(
skydir=(0, 0), npix=(500, 400), binsz=0.02, frame="galactic", axes=[axis]
)
geom
```
### Compute images
Exclusion mask currently unused. Remove here or move to later in the tutorial?
```
target_position = SkyCoord(0, 0, unit="deg", frame="galactic")
on_radius = 0.2 * u.deg
on_region = CircleSkyRegion(center=target_position, radius=on_radius)
exclusion_mask = geom.to_image().region_mask([on_region], inside=False)
exclusion_mask = WcsNDMap(geom.to_image(), exclusion_mask)
exclusion_mask.plot();
%%time
stacked = MapDataset.create(geom=geom)
stacked.edisp = None
maker = MapDatasetMaker(selection=["counts", "background", "exposure", "psf"])
maker_safe_mask = SafeMaskMaker(methods=["offset-max"], offset_max=2.5 * u.deg)
for obs in observations:
cutout = stacked.cutout(obs.pointing_radec, width="5 deg")
dataset = maker.run(cutout, obs)
dataset = maker_safe_mask.run(dataset, obs)
stacked.stack(dataset)
# The maps are cubes, with an energy axis.
# Let's also make some images:
dataset_image = stacked.to_image()
```
### Show images
Let's have a quick look at the images we computed ...
```
dataset_image.counts.smooth(2).plot(vmax=5);
dataset_image.background.plot(vmax=5);
dataset_image.excess.smooth(3).plot(vmax=2);
```
## Source Detection
Use the class `~gammapy.estimators.TSMapEstimator` and function `gammapy.estimators.utils.find_peaks` to detect sources on the images. We search for 0.1 deg sigma gaussian sources in the dataset.
```
spatial_model = GaussianSpatialModel(sigma="0.05 deg")
spectral_model = PowerLawSpectralModel(index=2)
model = SkyModel(spatial_model=spatial_model, spectral_model=spectral_model)
ts_image_estimator = TSMapEstimator(
model,
kernel_width="0.5 deg",
selection_optional=[],
downsampling_factor=2,
sum_over_energy_groups=False,
energy_edges=[0.1, 10] * u.TeV,
)
%%time
images_ts = ts_image_estimator.run(stacked)
sources = find_peaks(
images_ts["sqrt_ts"],
threshold=5,
min_distance="0.2 deg",
)
sources
source_pos = SkyCoord(sources["ra"], sources["dec"])
source_pos
# Plot sources on top of significance sky image
images_ts["sqrt_ts"].plot(add_cbar=True)
plt.gca().scatter(
source_pos.ra.deg,
source_pos.dec.deg,
transform=plt.gca().get_transform("icrs"),
color="none",
edgecolor="white",
marker="o",
s=200,
lw=1.5,
);
```
## Spatial analysis
See other notebooks for how to run a 3D cube or 2D image based analysis.
## Spectrum
We'll run a spectral analysis using the classical reflected regions background estimation method,
and using the on-off (often called WSTAT) likelihood function.
```
energy_axis = MapAxis.from_energy_bounds(0.1, 40, 40, unit="TeV", name="energy")
energy_axis_true = MapAxis.from_energy_bounds(
0.05, 100, 200, unit="TeV", name="energy_true"
)
geom = RegionGeom.create(region=on_region, axes=[energy_axis])
dataset_empty = SpectrumDataset.create(
geom=geom, energy_axis_true=energy_axis_true
)
dataset_maker = SpectrumDatasetMaker(
containment_correction=False, selection=["counts", "exposure", "edisp"]
)
bkg_maker = ReflectedRegionsBackgroundMaker(exclusion_mask=exclusion_mask)
safe_mask_masker = SafeMaskMaker(methods=["aeff-max"], aeff_percent=10)
%%time
datasets = Datasets()
for observation in observations:
dataset = dataset_maker.run(
dataset_empty.copy(name=f"obs-{observation.obs_id}"), observation
)
dataset_on_off = bkg_maker.run(dataset, observation)
dataset_on_off = safe_mask_masker.run(dataset_on_off, observation)
datasets.append(dataset_on_off)
plt.figure(figsize=(8, 8))
_, ax, _ = dataset_image.counts.smooth("0.03 deg").plot(vmax=8)
on_region.to_pixel(ax.wcs).plot(ax=ax, edgecolor="white")
plot_spectrum_datasets_off_regions(datasets, ax=ax)
```
### Model fit
The next step is to fit a spectral model, using all data (i.e. a "global" fit, using all energies).
```
%%time
spectral_model = PowerLawSpectralModel(
index=2, amplitude=1e-11 * u.Unit("cm-2 s-1 TeV-1"), reference=1 * u.TeV
)
model = SkyModel(spectral_model=spectral_model, name="source-gc")
datasets.models = model
fit = Fit(datasets)
result = fit.run()
print(result)
```
### Spectral points
Finally, let's compute spectral points. The method used is to first choose an energy binning, and then to do a 1-dim likelihood fit / profile to compute the flux and flux error.
```
# Flux points are computed on stacked observation
stacked_dataset = datasets.stack_reduce(name="stacked")
print(stacked_dataset)
energy_edges = MapAxis.from_energy_bounds("1 TeV", "30 TeV", nbin=5).edges
stacked_dataset.models = model
fpe = FluxPointsEstimator(energy_edges=energy_edges, source="source-gc")
flux_points = fpe.run(datasets=[stacked_dataset])
flux_points.table_formatted
```
### Plot
Let's plot the spectral model and points. You could do it directly, but for convenience we bundle the model and the flux points in a `FluxPointDataset`:
```
flux_points_dataset = FluxPointsDataset(data=flux_points, models=model)
flux_points_dataset.plot_fit();
```
## Exercises
* Re-run the analysis above, varying some analysis parameters, e.g.
* Select a few other observations
* Change the energy band for the map
* Change the spectral model for the fit
* Change the energy binning for the spectral points
* Change the target. Make a sky image and spectrum for your favourite source.
* If you don't know any, the Crab nebula is the "hello world!" analysis of gamma-ray astronomy.
```
# print('hello world')
# SkyCoord.from_name('crab')
```
## What next?
* This notebook showed an example of a first CTA analysis with Gammapy, using simulated 1DC data.
* Let us know if you have any question or issues!
| github_jupyter |
```
import pandas as pd
import matplotlib.pyplot as plt
import folium
from folium.plugins import MarkerCluster
%matplotlib inline
australia=pd.read_csv("https://frenzy86.s3.eu-west-2.amazonaws.com/fav/australia_cleaned.csv")
australia.head()
plt.figure(figsize=(18,12))
plt.hist(australia["confidence"],label="Sicurezza Incendi",color="red");
plt.xlabel("Livello di sicurezza degli incendi")
plt.ylabel("Numero di incendi")
plt.title("Grafico numero incendi e Livello di sicurezza")
plt.legend(loc=2);
plt.figure(figsize=(18,12))
plt.scatter(australia["confidence"],australia ["brightness"], label ="Sicurezza Incendi", color="orange");
plt.ylabel("Luminosità a 21 Kelvin")
plt.xlabel('Livello di sicurezza degli incendi')
plt.title("Grafico Livello di sicurezza incendi e la luminosità 21 Kelvin")
plt.legend(loc=2);
plt.figure(figsize=(18,12))
plt.scatter(australia["confidence"],australia ["bright_t31"], label ="Sicurezza Incendi", color="yellow");
plt.ylabel("Luminosità a 31 Kelvin")
plt.xlabel('Livello di sicurezza degli incendi')
plt.title("Grafico Livello di sicurezza incendi e la luminosità 31 Kelvin")
plt.legend(loc=2);
pd.crosstab(australia["sat_Terra"], australia["time_N"]).plot(kind="bar",figsize=(20,10));
plt.title("Rapporto tra gli incendi raccolti dal satellite terrestre in notturni e diurni")
plt.ylabel("N° di incendi riconosciuti dai satelliti")
plt.xlabel("Tipo di fuoco, notturno o diurno");
australia_1 =australia.copy()
australia_1.head()
data=australia_1[(australia_1["confidence"]>= 70)]
data.head()
data.shape
#Creare lista longitudine e latitudine
lat=data["latitude"].values.tolist()
long=data["longitude"].values.tolist()
#Mappa Australia
map1=folium.Map([-25.274398,133.775136],zoom_start=4)
#Creare un cluster di mappa
australia_cluster = MarkerCluster()
for latV,longV in zip(lat,long):
folium.Marker(location=[latV,longV]).add_to(australia_cluster)
#Aggiungere il cluster alla mappa che vogliamo stampare
australia_cluster.add_to(map1);
map1
localizacion=australia_1[(australia_1["frp"]>= 2500)]
localizacion.head()
map_2 = folium.Map([-25.274398,133.775136],zoom_start=4.5,tiles='Stamen Terrain')
lat_2 = localizacion["latitude"].values.tolist()
long_2 = localizacion["longitude"].values.tolist()
australia_cluster_2 = MarkerCluster().add_to(map_2)
for lat_2,long_2 in zip(lat_2,long_2):
folium.Marker([lat_2,long_2]).add_to(australia_cluster_2)
map_2
```
Vuoi conoscere gli incendi divampati dopo il 15 settembre 2019?
```
mes = australia_1[(australia_1["acq_date"]>= "2019-09-15")]
mes.head()
mes.describe()
map_sett = folium.Map([-25.274398,133.775136], zoom_start=4)
lat_3 = mes["latitude"].values.tolist()
long_3 = mes["longitude"].values.tolist()
australia_cluster_3 = MarkerCluster().add_to(map_sett)
for lat_3,long_3 in zip(lat_3,long_3):
folium.Marker([lat_3,long_3]).add_to(australia_cluster_3)
map_sett
```
#Play with Folium
```
44.4807035,11.3712528
import folium
m1 = folium.Map(location=[44.48, 11.37], tiles='openstreetmap', zoom_start=18)
m1.save('map1.html')
m1
m3.save("filename.png")
```
| github_jupyter |
<a href="https://colab.research.google.com/github/combineinator/combine-inator-acikhack2021/blob/main/Combineinator_Library.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
from google.colab import drive
drive.mount('/content/drive')
```
## CombineInator (parent class)
```
class CombineInator:
def __init__(self):
self.source = ""
def translate_model(self, source):
if source == "en":
tokenizer_trs = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-en-trk")
model_trs = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-en-trk")
pipe_trs = "translation_en_to_trk"
elif source == "tr":
tokenizer_trs = AutoTokenizer.from_pretrained("Helsinki-NLP/opus-mt-tr-en")
model_trs = AutoModelForSeq2SeqLM.from_pretrained("Helsinki-NLP/opus-mt-tr-en")
pipe_trs = "translation_tr_to_en"
return model_trs, tokenizer_trs, pipe_trs
def translate(self, pipe, model, tokenizer, response):
translator = pipeline(pipe, model=model, tokenizer=tokenizer)
# elde edilen cümleleri hedeflnen dile çevirme:
trans = translator(response)[0]["translation_text"]
return trans
```
## WikiWebScraper (child)
```
import requests
import re
from bs4 import BeautifulSoup
from tqdm import tqdm
from os.path import exists, basename, splitext
class WikiWebScraper(CombineInator):
def __init__(self):
self.__HEADERS_PARAM = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36"}
def category_scraping_interface(self, CATEGORY_QUERY, LIMIT, SAVE_PATH, PAGE_PER_SAVE, REMOVE_NUMBERS, JUST_TITLE_ANALYSIS, TEXT_INTO_SENTENCES_PARAM):
"""
Kategorik verilerin ayıklanma işlemleri bu fonksiyonda yönetilir.
:param CATEGORY_QUERY: Ayıklanacak kategori sorgusu.
:type CATEGORY_QUERY: str
:param SAVE_PATH: Ayıklanan verinin kaydedileceği yol.
:type SAVE_PATH: str
:param LIMIT: Ayıklanması istenen veri limiti. Verilmediği taktirde tüm verileri çeker.
:type LIMIT: int
:param PAGE_PER_SAVE: Belirlenen aralıkla ayıklanan kategorik verinin kaydedilmesini sağlar.
:type PAGE_PER_SAVE: int
:param TEXT_INTO_SENTENCES_PARAM: Ayıklanan verilerin cümleler halinde mi, yoksa metin halinde mi kaydedileceğini belirler.
:type TEXT_INTO_SENTENCES_PARAM: bool
:param REMOVE_NUMBERS: Ayıklanan verilerden rakamların silinip silinmemesini belirler.
:type REMOVE_NUMBERS: bool
:param JUST_TITLE_ANALYSIS: Sadece sayfaların başlık bilgilerinin toplanmasını sağlar.
:type JUST_TITLE_ANALYSIS: bool
"""
sub_list = []
page_list = []
text_list = []
page_list, sub_list = self.first_variable(CATEGORY_QUERY, (LIMIT - len(text_list)))
fv = True
if page_list and sub_list is not None:
with tqdm(total=LIMIT, desc="Sayfa taranıyor.") as pbar:
while len(page_list) < LIMIT:
if fv is True:
pbar.update(len(page_list))
fv = False
temp_soup = ""
if len(sub_list) == 0:
break
temp_soup = self.sub_scraper(sub_list[0])
if (temp_soup == False):
break
del sub_list[0]
sub_list = sub_list + self.sub_category_scraper(temp_soup)
temp_page_scraper = self.page_scraper(temp_soup, (LIMIT - len(page_list)))
if temp_page_scraper is not None:
for i in temp_page_scraper:
if i not in page_list:
page_list.append(i)
pbar.update(1)
if len(sub_list) == 0:
sub_list = sub_list + self.sub_category_scraper(temp_soup)
temp_range = 0
loop_counter = 0
if JUST_TITLE_ANALYSIS is False:
for i in range(PAGE_PER_SAVE, len(page_list)+PAGE_PER_SAVE, PAGE_PER_SAVE):
if loop_counter == (len(page_list) // PAGE_PER_SAVE):
PATH = SAVE_PATH + "/" + CATEGORY_QUERY + "_" + str(temp_range) + " - " + str(len(page_list)) + ".txt"
temp_text_list = self.text_into_sentences(self.text_scraper(page_list[temp_range:i], (len(page_list) % PAGE_PER_SAVE)), REMOVE_NUMBERS,TEXT_INTO_SENTENCES_PARAM)
else:
PATH = SAVE_PATH + "/" + CATEGORY_QUERY + "_" + str(temp_range) + " - " + str(i) + ".txt"
temp_text_list = self.text_into_sentences(self.text_scraper(page_list[temp_range:i], PAGE_PER_SAVE), REMOVE_NUMBERS, TEXT_INTO_SENTENCES_PARAM)
text_list += temp_text_list
self.save_to_csv(PATH, temp_text_list)
temp_range = i
loop_counter += 1
print("\n\n"+str(len(page_list)) + " adet sayfa bulundu ve içerisinden " + str(len(text_list)) + " satır farklı metin ayrıştırıldı.")
return text_list
else:
PATH = SAVE_PATH + "/" + CATEGORY_QUERY + "_" + str(len(page_list)) + "_page_links" + ".txt"
self.save_to_csv(PATH, page_list, JUST_TITLE_ANALYSIS)
print("\n\n"+str(len(page_list)) + " adet sayfa bulundu ve sayfaların adresleri \"" + PATH + "\" konumunda kaydedildi.")
return page_list
else:
print("Aranan kategori bulunamadı.")
def categorical_scraper(self, CATEGORY_QUERY, save_path, LIMIT=-1, page_per_save=10000, text_into_sentences_param=True, remove_numbers=False, just_title_analysis=False):
"""
Wikipedia üzerinden kategorik olarak veri çekmek için kullanılır.
:param CATEGORY_QUERY: Ayıklanacak kategori sorgusu.
:type CATEGORY_QUERY: str
:param save_path: Ayıklanan verinin kaydedileceği yol.
:type save_path: str
:param LIMIT: Ayıklanması istenen veri limiti. Verilmediği taktirde tüm verileri çeker.
:type LIMIT: int
:param page_per_save: Belirlenen aralıkla ayıklanan kategorik verinin kaydedilmesini sağlar.
:type page_per_save: int
:param text_into_sentences_param: Ayıklanan verilerin cümleler halinde mi, yoksa metin halinde mi kaydedileceğini belirler.
:type text_into_sentences_param: bool
:param remove_numbers: Ayıklanan verilerden rakamların silinip silinmemesini belirler.
:type remove_numbers: bool
:param just_title_analysis: Sadece sayfaların başlık bilgilerinin toplanmasını sağlar.
:type just_title_analysis: bool
"""
if LIMIT == -1:
LIMIT = 9999999
CATEGORY_QUERY = CATEGORY_QUERY.replace(" ","_")
return_list = self.category_scraping_interface(CATEGORY_QUERY, LIMIT, save_path, page_per_save, remove_numbers, just_title_analysis, text_into_sentences_param)
if return_list is None:
return []
else:
return return_list
def text_scraper_from_pagelist(self, page_list_path, save_path, page_per_save=10000, remove_numbers=False, text_into_sentences_param=True, RANGE=None):
"""
Wikipedia üzerinden kategorik olarak veri çekmek için kullanılır.
:param page_list_path: Toplanan sayfaların başlık bilgilerinin çıkartılmasını sağlar
:type page_list_path: str
:param save_path: Ayıklanan verinin kaydedileceği yol.
:type save_path: str
:param page_per_save: Belirlenen aralıkla ayıklanan kategorik verinin kaydedilmesini sağlar.
:type page_per_save: int
:param text_into_sentences_param: Ayıklanan verilerin cümleler halinde mi, yoksa metin halinde mi kaydedileceğini belirler.
:type text_into_sentences_param: bool
:param remove_numbers: Ayıklanan verilerden rakamların silinip silinmemesini belirler.
:type remove_numbers: bool
:param RANGE: Ayıklnacak verilerin aralığını belirler. "RANGE = [500,1000]" şeklinde kullanılır. Verilmediği zaman tüm veri ayıklanır.
:type RANGE: list
"""
page_list = []
text_list = []
with open(page_list_path, 'r') as f:
page_list = [line.strip() for line in f]
if RANGE is not None:
page_list = page_list[RANGE[0]:RANGE[1]]
temp_range = 0
loop_counter = 0
for i in range(page_per_save, len(page_list)+page_per_save, page_per_save):
if loop_counter == (len(page_list) // page_per_save):
PATH = save_path + "/" + "scraped_page" + "_" + str(temp_range) + " - " + str(len(page_list)) + ".txt"
temp_text_list = self.text_into_sentences(self.text_scraper(page_list[temp_range:i], (len(page_list) % page_per_save), True), remove_numbers, text_into_sentences_param)
else:
PATH = save_path + "/" + "scraped_page" + "_" + str(temp_range) + " - " + str(i) + ".txt"
temp_text_list = self.text_into_sentences(self.text_scraper(page_list[temp_range:i], page_per_save, True), remove_numbers, text_into_sentences_param)
text_list += temp_text_list
save_to_csv(PATH, temp_text_list)
temp_range = i
loop_counter += 1
print("\n\"" + page_list_path + "\" konumundaki " + str(len(page_list)) + " adet sayfa içerisinden " + str(len(text_list)) + " satır metin ayrıştırıldı.")
return text_list
def page_scraper(self, page_soup, LIMIT):
"""
Gönderilen wikipedia SOUP objesinin içerisindeki kategorik içerik sayfaları döndürür.
:param page_soup: Wikipedia kategori sayfasının SOUP objesidir.
:param LIMIT: Ayıklanacaj sayfa limitini belirler.
:type LIMIT: int
"""
page_list = []
try:
pages = page_soup.find("div", attrs={"id": "mw-pages"}).find_all("a")
for page in pages[1:]:
if len(page_list) == LIMIT:
break
else:
page_list.append([page.text, page["href"]])
return page_list
except:
pass
def sub_category_scraper(self, sub_soup):
"""
Gönderilen wikipedia SOUP objesinin içerisindeki alt kategorileri döndürür.
:param sub_soup: Alt kategori sayfasının SOUP objesidir.
"""
sub_list = []
try:
sub_categories = sub_soup.find_all("div", attrs={"class": "CategoryTreeItem"})
for sub in sub_categories[1:]:
sub_list.append([sub.a.text, sub.a["href"]])
return sub_list
except:
print("Aranan kategori için yeterli sayfa bulunamadı.")
def sub_scraper(self, sub):
"""
Fonksiyona gelen wikipedia kategori/alt kategorisinin SOUP objesini döndürür.
:param sub: Alt kategori sayfasının linkini içerir.
"""
try:
req = requests.get("https://tr.wikipedia.org" + str(sub[1]), headers=self.__HEADERS_PARAM)
soup = BeautifulSoup(req.content, "lxml")
return soup
except:
print("\nAlt kategori kalmadı")
return False
def text_scraper(self, page_list, LIMIT, IS_FROM_TXT=False):
"""
Önceden ayıklanmış sayfa listesini içerisindeki sayfaları ayıklayarak içerisindeki metin listesini döndürür.
:param page_list: Sayfa listesini içerir.
:parama LIMIT: Ayıklanacaj sayfa limitini belirler.
:type LIMIT: int
:param IS_FROM_TXT: Ayıklanacak sayfanın listeleden mi olup olmadığını kontrol eder.
"""
text_list = []
with tqdm(total=LIMIT, desc="Sayfa Ayrıştırılıyor") as pbar:
for page in page_list:
if len(text_list) == LIMIT:
break
if IS_FROM_TXT is False:
req = requests.get("https://tr.wikipedia.org" + str(page[1]), headers=self.__HEADERS_PARAM)
else:
req = requests.get("https://tr.wikipedia.org" + str(page), headers=self.__HEADERS_PARAM)
soup = BeautifulSoup(req.content, "lxml")
page_text = soup.find_all("p")
temp_text = ""
for i in page_text[1:]:
temp_text = temp_text + i.text
text_list.append(temp_text)
pbar.update(1)
return text_list
def first_variable(self, CATEGORY_QUERY, LIMIT):
"""
Sorguda verilen kategorinin doğruluğunu kontrol eder ve eğer sorgu doğru ise ilk değerleri ayıklar.
:param CATEGORY_QUERY: Ayıklanacak kategori sorgusu.
:type CATEGORY_QUERY: str
:param LIMIT: Ayıklanması istenen veri limiti. Verilmediği taktirde tüm verileri çeker.
:type LIMIT: int
"""
first_req = requests.get("https://tr.wikipedia.org/wiki/Kategori:" + CATEGORY_QUERY, headers=self.__HEADERS_PARAM)
first_soup = BeautifulSoup(first_req.content, "lxml")
page_list = self.page_scraper(first_soup, LIMIT)
sub_list = self.sub_category_scraper(first_soup)
return page_list, sub_list
def text_into_sentences(self, texts, remove_numbers, text_into_sentences_param):
"""
Metin verilerini cümlelerine ayıklar.
:param texts: Düzlenecek metin verisi.
:param remove_numbers: Sayıların temizlenip temizlenmeyeceğini kontrol eder.
:param text_into_sentences_param: Metinlerin cümlelere çevrilip çevrilmeyeceğini kontrol eder.
"""
flatlist = []
sent_list = []
texts = self.sentence_cleaning(texts, remove_numbers)
if text_into_sentences_param is True:
for line in texts:
temp_line = re.split(r'(?<![IVX0-9]\S)(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\?)\s', line)
for i in temp_line:
if len(i.split(" ")) > 3:
sent_list.append(i)
else:
sent_list = texts
flatlist = list(dict.fromkeys(self.flat(sent_list, flatlist)))
return flatlist
def flat(self, sl,fl):
"""
Metinler, cümlelerine ayırıldıktan sonra listenin düzlenmesine yarar.
:param sl: Yollanan listle.
:param fl: Düzlemem liste.
"""
for e in sl:
if type(e) == list:
flat(e,fl)
elif len(e.split(" "))>3:
fl.append(e)
return fl
def sentence_cleaning(self, sentences, remove_numbers):
"""
Ayıklanan wikipedia verilerinin temizlenmesi bu fonksiyonda gerçekleşir.
:param sentences: Temizlenmek için gelen veri seti.
:param remove_numbers: Sayıların temizlenip temizlenmeyeceğini kontrol eder.
"""
return_list = []
if remove_numbers is False:
removing_func = '[^[a-zA-ZğüışöçĞÜIİŞÖÇ0-9.,!:;`?%&\-\'" ]'
else:
removing_func = '[^[a-zA-ZğüışöçĞÜIİŞÖÇ.,!:;`?%&\-\'" ]'
for input_text in sentences:
try:
input_text = re.sub(r'(\[.*?\])', '', input_text)
input_text = re.sub(r'(\(.*?\))', '', input_text)
input_text = re.sub(r'(\{.*?\})', '', input_text)
input_text = re.sub(removing_func, '', input_text)
input_text = re.sub("(=+(\s|.)*)", "", input_text)
input_text = re.sub("(\s{2,})", "", input_text)
input_text = input_text.replace("''", "")
input_text = input_text.replace("\n", "")
return_list.append(input_text)
except:
pass
return return_list
def save_to_csv(self, PATH, data, is_just_title_analysis=False):
"""
Verilerin 'csv' formatında kaydedilmesini bu fonksiyonda gerçekleşir.
:param PATH: Kaydedilecek yol.
:param data: Kaydedilecek veri.
:param is_just_title_analysis: Sadece analiz yapılıp yapılmadığını kontrol eder.
"""
if is_just_title_analysis is False:
with open(PATH, "w") as output:
for i in data:
output.write(i+"\n")
else:
temp_data = []
for i in data:
temp_data.append(i[1])
with open(PATH, "w") as output:
for i in temp_data:
output.write(i+"\n")
```
### Örnek kullanım
```
library = WikiWebScraper()
PATH = "/content/"
library.categorical_scraper("savaş", PATH, 20, text_into_sentences_param=False)
```
## speechModule (child)
```
!pip install transformers
!pip install simpletransformers
from os import path
from IPython.display import Audio
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, Wav2Vec2Processor, Wav2Vec2ForCTC
import librosa
import torch
class speechModule(CombineInator):
def __init__(self):
self.SAMPLING_RATE = 16_000
self.git_repo_url = 'https://github.com/CorentinJ/Real-Time-Voice-Cloning.git'
self.project_name = splitext(basename(self.git_repo_url))[0]
def get_repo(self):
"""
Metinin sese çevrilmesi sırasında kullanılacak ses klonlama kütüphanesini çeker.
"""
if not exists(self.project_name):
# clone and install
!git clone -q --recursive {self.git_repo_url}
# install dependencies
!cd {self.project_name} && pip install -q -r requirements.txt
!pip install -q gdown
!apt-get install -qq libportaudio2
!pip install -q https://github.com/tugstugi/dl-colab-notebooks/archive/colab_utils.zip
# download pretrained model
!cd {self.project_name} && wget https://github.com/blue-fish/Real-Time-Voice-Cloning/releases/download/v1.0/pretrained.zip && unzip -o pretrained.zip
from sys import path as syspath
syspath.append(self.project_name)
def wav2vec_model(self, source):
"""
Sesin metne çevrilmesi sırasında kullanılacak ilgili dile göre wav2vec modelini belirler.
:param source: ses dosyası dili ("tr" / "en")
:type source: str
"""
processor = None
model = None
if source == "en":
processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h")
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h")
elif source =="tr":
processor = Wav2Vec2Processor.from_pretrained("m3hrdadfi/wav2vec2-large-xlsr-turkish")
model = Wav2Vec2ForCTC.from_pretrained("m3hrdadfi/wav2vec2-large-xlsr-turkish")
return model, processor
def speech2text(self, audio_file, model, processor, language):
"""
Girdi olarak verilen sesi metne çevirir.
:param audio_file: ses dosyasının yer aldığı dizin
type audio_file: str
:param model: sesin metne çevrilmesi esnasında kullanılacak huggingface kütüphanesinden çekilen model
:param processor: sesin metne çevrilmesi esnasında kullanılacak huggingface kütüphanesinden çekilen istemci
:param language: girdi olarak verilen ses doyasının dili ("tr" / "en")
:type language: str
"""
#load any audio file of your choice
speech, rate = librosa.load(audio_file, sr=self.SAMPLING_RATE)
input_values = processor(speech, sampling_rate=self.SAMPLING_RATE, return_tensors = 'pt').input_values
#Store logits (non-normalized predictions)
logits = model(input_values).logits
#Store predicted id's
predicted_ids = torch.argmax(logits, dim =-1)
#decode the audio to generate text
response = processor.decode(predicted_ids[0]).lower()
if language == "en":
response = ">>tur<< " + response
return response
def text2speech(self, audio, translation):
"""
Metini sese çevirir.
:param audio: klonlanacak ses doyasının yer aldığı dizin
:type audio: str
:param translation: çevirisi yapılmış metin
:type translation: str
"""
from numpy import pad as pad
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
encoder.load_model(self.project_name / Path("encoder/saved_models/pretrained.pt"))
synthesizer = Synthesizer(self.project_name / Path("synthesizer/saved_models/pretrained/pretrained.pt"))
vocoder.load_model(self.project_name / Path("vocoder/saved_models/pretrained/pretrained.pt"))
embedding = encoder.embed_utterance(encoder.preprocess_wav(audio, self.SAMPLING_RATE))
specs = synthesizer.synthesize_spectrograms([translation], [embedding])
generated_wav = vocoder.infer_waveform(specs[0])
generated_wav = pad(generated_wav, (0, self.SAMPLING_RATE), mode="constant")
return Audio(generated_wav, rate=self.SAMPLING_RATE, autoplay=True)
def speech2text2trans2speech(self, filename:str, source_lang:str, output_type:str = "text"):
"""
Aldığı ses dosyasını text'e dönüştürüp, hedeflenen dile çeviren ve çevirdiği metni ses olarak
döndüren fonksiyon.
:param filename: Ses dosyasının adı
:type filename: str
:param lang: Ses dosyası dili ("en"/"tr")
:type lang: str
"""
output_types = ["text", "speech"]
source_languages = ["en", "tr"]
if source_lang not in source_languages:
print("Kaynak dil olarak yalnızca 'en' ve 'tr' parametreleri kullanılabilir.")
return None
if output_type not in output_types:
print("Çıkış türü için yalnızca 'text' ve 'speech' parametreleri desteklenmektedir.")
return None
if source_lang == "en" and output_type=="speech":
print("Üzgünüz, text2speech modülümüzde Türkçe dil desteği bulunmamaktadır.\n")
return None
model_trs, tokenizer_trs, pipe_trs = CombineInator.translate_model(self, source_lang)
model_s2t, processor_s2t = self.wav2vec_model(source_lang)
input_text = self.speech2text(filename, model_s2t, processor_s2t, source_lang)
print(input_text)
translation = CombineInator.translate(self, pipe_trs, model_trs, tokenizer_trs, input_text)
if output_type == "text":
return translation
else:
print("\n" + translation + "\n")
return self.text2speech(filename, translation)
```
### Örnek kullanım
```
filename = "_path_to_wav_file" # ses dosyası pathi verilmelidir
speechM = speechModule()
speechM.get_repo()
speechM.speech2text2trans2speech(filename, "tr", "speech")
```
## Lxmert (child)
```
!git clone https://github.com/hila-chefer/Transformer-MM-Explainability
import os
os.chdir(f'./Transformer-MM-Explainability')
!pip install -r requirements.txt
%cd Transformer-MM-Explainability
from lxmert.lxmert.src.modeling_frcnn import GeneralizedRCNN
import lxmert.lxmert.src.vqa_utils as utils
from lxmert.lxmert.src.processing_image import Preprocess
from transformers import LxmertTokenizer
from lxmert.lxmert.src.huggingface_lxmert import LxmertForQuestionAnswering
from lxmert.lxmert.src.lxmert_lrp import LxmertForQuestionAnswering as LxmertForQuestionAnsweringLRP
from tqdm import tqdm
from lxmert.lxmert.src.ExplanationGenerator import GeneratorOurs, GeneratorBaselines, GeneratorOursAblationNoAggregation
import random
import numpy as np
import cv2
import torch
import matplotlib.pyplot as plt
from PIL import Image
import torchvision.transforms as transforms
from captum.attr import visualization
import requests
class Lxmert(CombineInator):
def __init__(self):
self.OBJ_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/objects_vocab.txt"
self.ATTR_URL = "https://raw.githubusercontent.com/airsplay/py-bottom-up-attention/master/demo/data/genome/1600-400-20/attributes_vocab.txt"
self.VQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_label2ans.json"
self.model_lrp = self.ModelUsage()
self.lrp = GeneratorOurs(self.model_lrp)
self.baselines = GeneratorBaselines(self.model_lrp)
self.vqa_answers = utils.get_data(self.VQA_URL)
class ModelUsage:
"""
Model kullanımı için sınıf yapısı
"""
def __init__(self, use_lrp=True):
self.VQA_URL = "https://raw.githubusercontent.com/airsplay/lxmert/master/data/vqa/trainval_label2ans.json"
self.vqa_answers = utils.get_data(self.VQA_URL)
# load models and model components
self.frcnn_cfg = utils.Config.from_pretrained("unc-nlp/frcnn-vg-finetuned")
self.frcnn_cfg.MODEL.DEVICE = "cuda"
self.frcnn = GeneralizedRCNN.from_pretrained("unc-nlp/frcnn-vg-finetuned", config=self.frcnn_cfg)
self.image_preprocess = Preprocess(self.frcnn_cfg)
self.lxmert_tokenizer = LxmertTokenizer.from_pretrained("unc-nlp/lxmert-base-uncased")
if use_lrp:
self.lxmert_vqa = LxmertForQuestionAnsweringLRP.from_pretrained("unc-nlp/lxmert-vqa-uncased").to("cuda")
else:
self.lxmert_vqa = LxmertForQuestionAnswering.from_pretrained("unc-nlp/lxmert-vqa-uncased").to("cuda")
self.lxmert_vqa.eval()
self.model = self.lxmert_vqa
# self.vqa_dataset = vqa_data.VQADataset(splits="valid")
def forward(self, item):
PATH, question = item
self.image_file_path = PATH
# run frcnn
images, sizes, scales_yx = self.image_preprocess(PATH)
output_dict = self.frcnn(
images,
sizes,
scales_yx=scales_yx,
padding="max_detections",
max_detections= self.frcnn_cfg.max_detections,
return_tensors="pt"
)
inputs = self.lxmert_tokenizer(
question,
truncation=True,
return_token_type_ids=True,
return_attention_mask=True,
add_special_tokens=True,
return_tensors="pt"
)
self.question_tokens = self.lxmert_tokenizer.convert_ids_to_tokens(inputs.input_ids.flatten())
self.text_len = len(self.question_tokens)
# Very important that the boxes are normalized
normalized_boxes = output_dict.get("normalized_boxes")
features = output_dict.get("roi_features")
self.image_boxes_len = features.shape[1]
self.bboxes = output_dict.get("boxes")
self.output = self.lxmert_vqa(
input_ids=inputs.input_ids.to("cuda"),
attention_mask=inputs.attention_mask.to("cuda"),
visual_feats=features.to("cuda"),
visual_pos=normalized_boxes.to("cuda"),
token_type_ids=inputs.token_type_ids.to("cuda"),
return_dict=True,
output_attentions=False,
)
return self.output
def ceviri(self, text: str, lang_src='tr'):
"""
Aldığı metni istenilen dile çeviren fonksiyon.
:param text: Orjinal metin
:type text: str
:param lang_src: Metin dosyasının dili (kaynak dili)
:type lang_src: str
:param lang_tgt: Çevrilecek dil (hedef dil)
:param lang_tgt: str
:return: translated text
"""
if lang_src == "en":
text = ">>tur<< " + text
model, tokenizer, pipeline = CombineInator.translate_model(self, lang_src)
return (CombineInator.translate(self, pipeline, model, tokenizer, text))
def save_image_vis(self, image_file_path, bbox_scores):
"""
Resim üzerinde sorunun cevabını çizer ve kaydeder.
:param image_file_path: imgenin yer aldığı dizin
:type image_file_path: str
:param bbox_scores: tespit edilen nesnelerin skorlarını içeren tensor
:type bbox_scores: tensor
"""
_, top_bboxes_indices = bbox_scores.topk(k=1, dim=-1)
img = cv2.imread(image_file_path)
mask = torch.zeros(img.shape[0], img.shape[1])
for index in range(len(bbox_scores)):
[x, y, w, h] = self.model_lrp.bboxes[0][index]
curr_score_tensor = mask[int(y):int(h), int(x):int(w)]
new_score_tensor = torch.ones_like(curr_score_tensor)*bbox_scores[index].item()
mask[int(y):int(h), int(x):int(w)] = torch.max(new_score_tensor,mask[int(y):int(h), int(x):int(w)])
mask = (mask - mask.min()) / (mask.max() - mask.min())
mask = mask.unsqueeze_(-1)
mask = mask.expand(img.shape)
img = img * mask.cpu().data.numpy()
cv2.imwrite('lxmert/lxmert/experiments/paper/new.jpg', img)
def get_image_and_question(self, img_path:str, soru:str):
"""
Input olarak verilen imge ve soruyu döndürür.
:param img_path: Soru sorulacak imgenin path bilgisi
:type img_path: str
:param soru: Resim özelinde modele sorulacak olan Türkçe soru
:type soru: str
:return: image_scores, text_scores
"""
ing_soru = self.ceviri(soru, "tr")
R_t_t, R_t_i = self.lrp.generate_ours((img_path, ing_soru), use_lrp=False, normalize_self_attention=True, method_name="ours")
return R_t_i[0], R_t_t[0]
def resim_uzerinden_soru_cevap(self, PATH:str, turkce_soru:str):
"""
Verilen girdi imgesi üzerinden yine veriler sorular ile sorgulama yapılabilmesini
sağlar.
PATH: imgenin path bilgisi
turkce_soru: Resimde cevabı aranacak soru
"""
#Eğer sorgulanacak resim local'de yok ve internet üzerinden bir resim ise:
if PATH.startswith("http"):
im = Image.open(requests.get(PATH, stream=True).raw)
im.save('lxmert/lxmert/experiments/paper/online_image.jpg', 'JPEG')
PATH = 'lxmert/lxmert/experiments/paper/online_image.jpg'
image_scores, text_scores = self.get_image_and_question(PATH, turkce_soru)
self.save_image_vis(PATH, image_scores)
orig_image = Image.open(self.model_lrp.image_file_path)
fig, axs = plt.subplots(ncols=2, figsize=(20, 5))
axs[0].imshow(orig_image);
axs[0].axis('off');
axs[0].set_title('original');
masked_image = Image.open('lxmert/lxmert/experiments/paper/new.jpg')
axs[1].imshow(masked_image);
axs[1].axis('off');
axs[1].set_title('masked');
text_scores = (text_scores - text_scores.min()) / (text_scores.max() - text_scores.min())
vis_data_records = [visualization.VisualizationDataRecord(text_scores,0,0,0,0,0,self.model_lrp.question_tokens,1)]
visualization.visualize_text(vis_data_records)
cevap = self.ceviri(self.vqa_answers[self.model_lrp.output.question_answering_score.argmax()], lang_src='en')
print("ANSWER:", cevap)
```
### Örnek kullanım
```
lxmert = Lxmert()
PATH = '_path_to_jpg_' # jpg dosyası pathi verilmelidir
turkce_soru = 'Resimde neler var'
lxmert.resim_uzerinden_soru_cevap(PATH, turkce_soru)
```
## Web Arayüz
```
!pip install flask-ngrok
from flask import Flask, redirect, url_for, render_template, request, flash
from flask_ngrok import run_with_ngrok
# Burada web_dependencies klasörü içerisinde bulunan klasörlerin pathi verilmelidir.
template_folder = '_path_to_templates_folder_'
static_folder = '_path_to_static_folder_'
app = Flask(__name__, template_folder=template_folder, static_folder=static_folder)
run_with_ngrok(app) # Start ngrok when app is run
@app.route("/", methods=['GET', 'POST'])
def home():
if request.method == 'POST':
konu = request.form["topic"]
library.categorical_scraper(konu, PATH, 20, text_into_sentences_param=False)
return render_template("index.html")
if __name__ == "__main__":
#app.debug = True
app.run()
```
| github_jupyter |
```
import numpy as np
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
import tensorflow_probability as tfp
# -- plotting
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
#import tensorflow as tf
"""
Gumbel Softmax functions borrowed from http://blog.evjang.com/2016/11/tutorial-categorical-variational.html
"""
def sample_gumbel(shape, eps=1e-7):
"""Sample from Gumbel(0, 1)"""
U = tf.random_uniform(shape,minval=0,maxval=1)
return -tf.log(-tf.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
""" Draw a sample from the Gumbel-Softmax distribution"""
y = logits + sample_gumbel(tf.shape(logits))
return tf.nn.softmax( y / temperature)
def gumbel_softmax(logits, temperature, hard=False):
"""Sample from the Gumbel-Softmax distribution and optionally discretize.
Args:
logits: [batch_size, n_class] unnormalized log-probs
temperature: non-negative scalar
hard: if True, take argmax, but differentiate w.r.t. soft sample y
Returns:
[batch_size,..., n_class] sample from the Gumbel-Softmax distribution.
If hard=True, then the returned sample will be one-hot, otherwise it will
be a probabilitiy distribution that sums to 1 across classes
"""
y = gumbel_softmax_sample(logits, temperature)
if hard:
k = tf.shape(logits)[-1]
#y_hard = tf.cast(tf.one_hot(tf.argmax(y,1),k), y.dtype)
y_hard = tf.cast(tf.equal(y,tf.reduce_max(y,-1,keep_dims=True)),y.dtype)
y = tf.stop_gradient(y_hard - y) + y
return y
d = gumbel_softmax(np.array([np.log(0.5), np.log(0.5)] ), 0.5, hard=True)
tfp.__version__
sess = tf.Session()
_Mmin = tf.get_variable(name='mass', initializer=13.2, dtype=tf.float32)
Mhalo = tf.convert_to_tensor(np.random.uniform(11., 14., 1000), dtype=tf.float32)
siglogm = tf.convert_to_tensor(0.2, dtype=tf.float32)
temperature = 0.5
def Ncen(Mmin):
# mean occupation of centrals
return tf.clip_by_value(0.5 * (1+tf.math.erf((Mhalo - Mmin)/siglogm)),1e-4,1-1e-4)
def hod(Mmin):
p = Ncen(Mmin)
samp = gumbel_softmax(tf.stack([tf.log(p), tf.log(1.-p)],axis=1), temperature, hard=True)
return samp[...,0]
def numden(Mmin):
return tf.reduce_sum(hod(Mmin))
ncen,mh,nh = sess.run([Ncen(12.5), Mhalo, hod(12.5)] )
plt.scatter(mh, (ncen))
plt.xlim(11, 13.5)
plt.scatter(mh, (ncen), c='k')
plt.scatter(mh, nh)
plt.xlim(11., 13.5)
Mmin_true = 12.5
loss = (numden(Mmin_true) - numden(_Mmin))**2
opt = tf.train.AdamOptimizer(learning_rate=0.01)
opt_op = opt.minimize(loss)
sess.run(tf.global_variables_initializer())
losses=[]
masses=[]
for i in range(200):
_,l,m = sess.run([opt_op, loss, _Mmin])
losses.append(l)
masses.append(m)
losses
%pylab inline
plot(losses)
plot(masses)
axhline(Mmin_true, color='r', label='True Mmin')
xlim(0,200)
xlabel('Number of iterations')
ylabel('Mmin')
legend()
```
| github_jupyter |
```
%matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
```
# Reflect Tables into SQLAlchemy ORM
```
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# We can view all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurements = Base.classes.measurement
Stations = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
```
# Exploratory Climate Analysis
```
# Design a query to retrieve the last 12 months of precipitation data and plot the results
CY_precipitation = session.query(Measurements.date).filter(Measurements.date >= "2016-08-23").order_by(Measurements.date).all()
# # Calculate the date 1 year ago from the last data point in the database
LY_precipitation = session.query(Measurements.date).filter(Measurements.date).order_by(Measurements.date.desc()).first()
last_date = dt.date(2017,8,23) - dt.timedelta(days=365)
last_date
# # # Perform a query to retrieve the data and precipitation scores
last_year = session.query(Measurements.prcp, Measurements.date).order_by(Measurements.date.desc())
# # # Save the query results as a Pandas DataFrame and set the index to the date column, Sort the dataframe by date
date = []
precipitation = []
last_year_df = last_year.filter(Measurements.date >="2016-08-23")
for precip in last_year_df:
date.append(precip.date)
precipitation.append(precip.prcp)
LY_df = pd.DataFrame({
"Date": date,
"Precipitation": precipitation
})
LY_df.set_index("Date", inplace=True)
LY_df = LY_df.sort_index(ascending=True)
# # Use Pandas Plotting with Matplotlib to plot the data
LY_graph = LY_df.plot(figsize = (20,10), rot=90, title= "Hawaii Precipitation Data 8/23/16-8/23/17")
LY_graph.set_ylabel("Precipitation (in)")
plt.savefig("Images/PrecipitationAugtoAug.png")
# Use Pandas to calcualte the summary statistics for the precipitation data
LY_df.describe()
# Design a query to show how many stations are available in this dataset?
station_count = session.query(Measurements.station, func.count(Measurements.station)).\
group_by(Measurements.station).all()
print("There are " + str(len(station_count)) + " stations in this dataset.")
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
active_stations = session.query(Measurements.station, func.count(Measurements.station)).\
group_by(Measurements.station).\
order_by(func.count(Measurements.station).desc()).all()
active_stations
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
most_active = session.query(Measurements.station).group_by(Measurements.station).order_by(func.count(Measurements.station).desc()).first()
most_active_info = session.query(Measurements.station, func.min(Measurements.tobs), func.max(Measurements.tobs), func.avg(Measurements.tobs)).\
filter(Measurements.station == most_active[0]).all()
print("The lowest temperature at station " + most_active_info[0][0] + " is " + str(most_active_info[0][1]) + " degrees.")
print("The highest temperature at station " + most_active_info[0][0] + " is " + str(most_active_info[0][2]) + " degrees.")
print("The average temperature at station " + most_active_info[0][0] + " is " + str(round(most_active_info[0][3], 2)) + " degrees.")
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
tobs_query = session.query(Measurements.station).group_by(Measurements.station).order_by(func.count(Measurements.id).desc()).first()
tobs = session.query(Measurements.station, Measurements.tobs).filter(Measurements.station == tobs_query[0]).filter(Measurements.date > last_date).all()
station_df = pd.DataFrame(tobs, columns = ['station', 'tobs'])
station_df.hist(column='tobs', bins=12)
plt.ylabel('Frequency')
plt.show()
```
| github_jupyter |
```
#import libraries
import cv2
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.models import model_from_json
import pickle
import tkinter as tk
from tkinter import filedialog
from tkinter import PhotoImage
from pygame import mixer
import matplotlib.pyplot as plt
import random
import os
#Taking picture from webcam and detecting emotion
#load model
model = model_from_json(open("model.json", "r").read())
#load weights
model.load_weights('model.h5')
emotion_dict = {0: "Angry", 1: "Disgusted", 2: "Fearful", 3: "Happy", 4: "Neutral", 5: "Sad", 6: "Surprised"}
cap=cv2.VideoCapture(0)
ret, frame =cap.read()
facecasc = cv2.CascadeClassifier('haarcascade_frontalface_alt2.xml')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = facecasc.detectMultiScale(gray,scaleFactor=1.3, minNeighbors=5)
for (x, y, w, h) in faces:
cv2.rectangle(frame, (x, y-50), (x+w, y+h+10), (255, 0, 0), 2)
roi_gray = gray[y:y + h, x:x + w]
cropped_img = np.expand_dims(np.expand_dims(cv2.resize(roi_gray, (48, 48)), -1), 0)
prediction = model.predict(cropped_img)
maxindex = int(np.argmax(prediction))
cv2.putText(frame, emotion_dict[maxindex], (x+20, y-60), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)
dominantemotion=emotion_dict[maxindex]
#cv2.imshow('Video', cv2.resize(frame,(1600,960),interpolation = cv2.INTER_CUBIC))
plt.imshow(frame)
cap.release()
cv2.destroyAllWindows()
#Playing music from detected emotion
class Playe(tk.Frame):
def __init__(self, dominantemotion , master=None):
super().__init__(master)
self.master = master
self.pack()
mixer.init()
self.dominantemotion=dominantemotion
print(self.dominantemotion)
self.playlist=[]
self.current = 0
self.paused = True
self.played = False
self.create_frames()
self.track_widgets()
self.control_widgets()
self.tracklist_widgets()
self.retrieve_songs()
def create_frames(self):
self.track = tk.LabelFrame(self, text='Song Track',
font=("times new roman",15,"bold"),
bg="grey",fg="white",bd=5,relief=tk.GROOVE)
self.track.config(width=410,height=300)
self.track.grid(row=0, column=0, padx=10)
self.tracklist = tk.LabelFrame(self, text=f'PlayList - {str(len(self.playlist))}',
font=("times new roman",15,"bold"),
bg="grey",fg="white",bd=5,relief=tk.GROOVE)
self.tracklist.config(width=190,height=400)
self.tracklist.grid(row=0, column=1, rowspan=3, pady=5)
self.controls = tk.LabelFrame(self,
font=("times new roman",15,"bold"),
bg="white",fg="white",bd=2,relief=tk.GROOVE)
self.controls.config(width=410,height=80)
self.controls.grid(row=2, column=0, pady=5, padx=10)
def track_widgets(self):
self.canvas = tk.Label(self.track, image=img)
self.canvas.configure(width=400, height=240)
self.canvas.grid(row=0,column=0)
self.songtrack = tk.Label(self.track, font=("times new roman",16,"bold"),
bg="white",fg="dark blue")
self.songtrack['text'] = 'VFSTR MP3 Player'
self.songtrack.config(width=30, height=1)
self.songtrack.grid(row=1,column=0,padx=10)
def control_widgets(self):
self.prev = tk.Button(self.controls, image=prev)
self.prev['command'] = self.prev_song
self.prev.grid(row=0, column=1)
self.pause = tk.Button(self.controls, image=pause)
self.pause['command'] = self.pause_song
self.pause.grid(row=0, column=2)
self.next = tk.Button(self.controls, image=next_)
self.next['command'] = self.next_song
self.next.grid(row=0, column=3)
self.volume = tk.DoubleVar(self)
self.slider = tk.Scale(self.controls, from_ = 0, to = 10, orient = tk.HORIZONTAL)
self.slider['variable'] = self.volume
self.slider.set(8)
mixer.music.set_volume(0.8)
self.slider['command'] = self.change_volume
self.slider.grid(row=0, column=4, padx=5)
def tracklist_widgets(self):
self.scrollbar = tk.Scrollbar(self.tracklist, orient=tk.VERTICAL)
self.scrollbar.grid(row=0,column=1, rowspan=5, sticky='ns')
self.list = tk.Listbox(self.tracklist, selectmode=tk.SINGLE,
yscrollcommand=self.scrollbar.set, selectbackground='sky blue')
self.enumerate_songs()
self.list.config(height=22)
self.list.bind('<Double-1>', self.play_song)
self.scrollbar.config(command=self.list.yview)
self.list.grid(row=0, column=0, rowspan=5)
def retrieve_songs(self):
self.songlist = []
if(self.dominantemotion=='angry'):
directory = r'C:\Users\USER\Downloads\Code\Angry'
elif(self.dominantemotion=='Surprised'):
directory = r'C:\Users\USER\Downloads\Code\Sad'
elif(self.dominantemotion=='Happy'):
directory = r'C:\Users\USER\Downloads\Code\Happy'
elif(self.dominantemotion=='Neutral'):
directory = r'C:\Users\USER\Downloads\Code\Neutral'
elif(self.dominantemotion=='fear'):
directory = r'C:\Users\USER\Downloads\Code\Fear'
#filedialog.askdirectory()
for root_, dirs, files in os.walk(directory):
for file in files:
if os.path.splitext(file)[1] == '.mp3':
path = (root_ + '/' + file).replace('\\','/')
self.songlist.append(path)
self.playlist = self.songlist
random.shuffle(self.playlist)
self.tracklist['text'] = f'PlayList - {str(len(self.playlist))}'
self.list.delete(0, tk.END)
self.enumerate_songs()
self.play_song()
def enumerate_songs(self):
for index, song in enumerate(self.playlist):
self.list.insert(index, os.path.basename(song))
def play_song(self, event=None):
if event is not None:
self.current = self.list.curselection()[0]
for i in range(len(self.playlist)):
self.list.itemconfigure(i, bg="white")
print(self.playlist[self.current])
mixer.music.load(self.playlist[self.current])
self.songtrack['anchor'] = 'w'
self.songtrack['text'] = os.path.basename(self.playlist[self.current])
self.pause['image'] = play
self.paused = False
self.played = True
self.list.activate(self.current)
self.list.itemconfigure(self.current, bg='sky blue')
mixer.music.play()
def pause_song(self):
if not self.paused:
self.paused = True
mixer.music.pause()
self.pause['image'] = pause
else:
if self.played == False:
self.play_song()
self.paused = False
mixer.music.unpause()
self.pause['image'] = play
def prev_song(self):
if self.current > 0:
self.current -= 1
self.list.itemconfigure(self.current + 1, bg='white')
else:
self.current = 0
self.play_song()
def next_song(self):
if self.current < len(self.playlist) - 1:
self.list.itemconfigure(self.current, bg='white')
self.current += 1
else:
self.current = 0
self.list.itemconfigure(len(self.playlist) - 1, bg='white')
self.play_song()
def change_volume(self, event=None):
self.v = self.volume.get()
mixer.music.set_volume(self.v / 10)
root = tk.Tk()
root.geometry('600x400')
root.wm_title('VFSTR Music Recommendation System')
img = PhotoImage(master=root,file=r'C:\Users\USER\Downloads\Code\images\music.gif')
next_ = PhotoImage(master=root,file = r'C:\Users\USER\Downloads\Code\images\next.gif')
prev = PhotoImage(master=root,file=r'C:\Users\USER\Downloads\Code\images\previous.gif')
play = PhotoImage(master=root,file=r'C:\Users\USER\Downloads\Code\images\play.gif')
pause = PhotoImage(master=root,file=r'C:\Users\USER\Downloads\Code\images\pause.gif')
app = Playe(dominantemotion, master=root)
app.mainloop()
pwd
```
| github_jupyter |
<a href="https://colab.research.google.com/github/humbertoguell/daa2020_1/blob/master/21octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
```
2+3+
for r in range(n):
sumaRenglon=0
sumaRenglon=0
sumaRenglon=0
for c in range(n):
sumaRenglon +=a2d.get_item(r,c)
total += a2d.get_item(r,c)
def ejemplo1( n ):
c = n + 1
d = c * n
e = n * n
total = c + e - d
print(f"total={ total }")
ejemplo1( 99999 )
def ejemplo2( n ):
contador = 0
for i in range( n ) :
for j in range( n ) :
contador += 1
return contador
ejemplo2( 100 )
def ejemplo3( n ): # n=4
x = n * 2 # x = 8
y = 0 # y = 0
for m in range( 100 ): #3
y = x - n # y = 4
return y
ejemplo3(1000000000)
def ejemplo4( n ):
x = 3 * 3.1416 + n
y = x + 3 * 3 - n
z = x + y
return z
ejemplo4(9)
def ejemplo5( x ):
n = 10
for j in range( 0 , x , 1 ):
n = j + n
return n
ejemplo5(1000000)
from time import time
def ejemplo6( n ):
start_time = time()
data=[[[1 for x in range(n)] for x in range(n)]
for x in range(n)]
suma = 0
for d in range(n):
for r in range(n):
for c in range(n):
suma += data[d][r][c]
elapsed_time = time() - start_time
print("Tiempo transcurrido: %0.10f segundos." % elapsed_time)
return suma
ejemplo6( 500 )
def ejemplo7( n ):
count = 0
for i in range( n ) :
for j in range( 25 ) :
for k in range( n ):
count += 1
return count
def ejemplo7_2( n ):
count = 1
for i in range( n ) :
for j in range( 25 ) :
for k in range( n ):
count += 1
for k in range( n ):
count += 1
return count # 1 + 25n^2 +25n^2
ejemplo7_2(3)
def ejemplo8( numeros ): # numeros es una lista (arreglo en c)
total = 0
for index in range(len(numeros)):
total = numeros[index]
return total
ejemplo8(numeros)
def ejemplo9( n ):
contador = 0
basura = 0
for i in range( n ) :
contador += 1
for j in range( n ) :
contador += 1
basura = basura + contador
return contador
print(ejemplo9( 5 ))
#3+2n
def ejemplo10( n ):
count = 0
for i in range( n ) :
for j in range( i+1 ) :
count += 1
return count
def ejemplo10( n ):
count = 0
for i in range( n ) :
for j in range( i ) :
count += 1
return count
print(ejemplo10(5))
"""
n= 3
000
n00 <-- aqui empieza el for interno
nn0 <--- aqui termina el for interno
nnn
n = 4
0000
n000 <-- aqui empieza el for interno
nn00
nnn0 <--- aqui termina el for interno
nnnn
n =5
00000
n0000 <-- aqui empieza el for interno
nn000
nnn00
nnnn0 <--- aqui termina el for interno
nnnnn
"""
def ejemplo11( n ):
count = 0
i = n
while i > 1 :
count += 1
i = i // 2
return count
print(ejemplo11(16))
# T(n) = 2 + (2 Log 2 n)
def ejemplo12( n ):
contador = 0
for x in range(n):
contador += ejemplo11(x)
return contador
def ejemplo12_bis( n=5 ):
contador = 0
contador = contador + ejemplo11(0) # 0
contador = contador + ejemplo11(1) # 0
contador = contador + ejemplo11(2) # 1
contador = contador + ejemplo11(3) # 1
contador = contador + ejemplo11(4) # 2
return contador
ejemplo12_bis( 5 )
def ejemplo13( x ):
bandera = x
contador = 0
while( bandera >= 10):
print(f" x = { bandera } ")
bandera /= 10
contador = contador + 1
print(contador)
# T(x) = log10 x +1
ejemplo13( 1000 )
def ejemplo14( n ):
y = n
z = n
contador = 0
while y >= 3: #3
y /= 3 # 1
contador += 1 # cont =3
while z >= 3: #27
z /= 3
contador += 1
return contador
```
| github_jupyter |
# Framing models
```
import lettertask
import patches
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
from tqdm import tqdm
import lazytools_sflippl as lazytools
import plotnine as gg
import pandas as pd
cbm = lettertask.data.CompositionalBinaryModel(
width=[5, 5],
change_probability=[0.05, 0.5],
samples=10000,
seed=1001
)
cts = patches.data.Contrastive1DTimeSeries(cbm.to_array(), seed=202)
```
## Base-reconstructive model
```
class BaRec(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps, bias=bias)
self.decoder = nn.Conv1d(latent_features, input_features, 1, bias=bias)
def forward(self, x):
code = self.encoder(x['current_values'])
prediction = self.predictor(code)
decoded = self.decoder(prediction).transpose(1, 2)
return decoded
barec = BaRec(1, data=cts)
optimizer = optim.Adam(barec.parameters())
criterion = nn.MSELoss()
data = cts[0]
prediction = barec(data)
print(data['future_values'].shape)
print(prediction.shape)
ideal = np.array([[1,0],[0,1]], dtype=np.float32).repeat(5,1)/np.sqrt(5)
ideal
barec = BaRec(1, data=cts, bias=False)
optimizer = optim.Adam(barec.parameters())
criterion = nn.MSELoss()
loss_traj = []
angles = []
running_loss = 0
for epoch in tqdm(range(10)):
for i, data in enumerate(cts):
if i<len(cts):
optimizer.zero_grad()
prediction = barec(data)
loss = criterion(prediction, data['future_values'])
loss.backward()
optimizer.step()
running_loss += loss
if i % 50 == 49:
loss_traj.append(running_loss.detach.numpy()/50)
running_loss = 0
est = next(barec.parameters()).detach().numpy()
angles.append(np.matmul(ideal, est.T)/np.sqrt(np.matmul(est, est.T)))
(gg.ggplot(
lazytools.array_to_dataframe(
np.array(
[l.detach().numpy() for l in loss_traj]
)
),
gg.aes(x='dim0', y='array')
) +
gg.geom_smooth(method='loess'))
(gg.ggplot(
lazytools.array_to_dataframe(
np.concatenate(angles, axis=1)
),
gg.aes(x='dim1', y='array', color='dim0', group='dim0')
) +
gg.geom_line())
np.save('angles.npy', np.concatenate(angles, axis=1))
```
## 1-part latent predictive model
```
class LaPred1P(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps*latent_features, bias=bias)
def forward(self, x):
code = self.encoder(x['input'])
prediction = self.predictor(code).\
reshape(self.timesteps, self.latent_features)
return prediction
hmm = patches.data.HiddenMarkovModel(cbm.to_array(), cbm.latent_array()[:, [0]])
lapred1p = LaPred1P(1, data=hmm, bias=False)
lapred1p(hmm[0])
hmm[0]['future_latent_values']
lapred1p = LaPred1P(1, data=hmm, bias=False)
optimizer = optim.Adam(lapred1p.parameters())
criterion = nn.MSELoss()
running_loss = 0
loss_traj = []
angles = []
for epoch in tqdm(range(10)):
for i, data in enumerate(hmm):
if i<len(hmm):
if i % 10 == 0:
est = list(lapred1p.parameters())[0].detach().numpy()
angles.append(np.matmul(ideal, est.T)/np.sqrt(np.matmul(est, est.T)))
optimizer.zero_grad()
prediction = lapred1p(data)
loss = criterion(prediction, data['future_latent_values'])
loss.backward()
optimizer.step()
running_loss += loss
if i % 50 == 49:
loss_traj.append(running_loss.detach().numpy()/50)
running_loss = 0
list(lapred1p.parameters())
(gg.ggplot(
lazytools.array_to_dataframe(
np.array(loss_traj)
),
gg.aes(x='dim0', y='array')
) +
gg.geom_smooth(method='loess'))
np.concatenate(angles, axis=1).shape
(gg.ggplot(
lazytools.array_to_dataframe(
np.concatenate(angles, axis=1)
),
gg.aes(x='dim1', y='array', color='dim0', group='dim0')
) +
gg.geom_line())
```
## 2-part latent predictive model
```
class LaPred2P(nn.Module):
def __init__(self, latent_features, input_features=None, timesteps=None,
data=None, bias=True):
super().__init__()
if data:
input_features = input_features or data.n_vars
timesteps = timesteps or data.n_timesteps
elif input_features is None or timesteps is None:
raise ValueError('You must either provide data or both input '
'features and timesteps.')
self.latent_features = latent_features
self.input_features = input_features
self.timesteps = timesteps
self.encoder = nn.Linear(input_features, latent_features, bias=bias)
self.predictor = nn.Linear(latent_features, timesteps*latent_features, bias=bias)
def forward(self, x):
code = self.encoder(x['input'])
prediction = self.predictor(x['latent_values']).\
reshape(self.timesteps, self.latent_features)
return {
'latent_values': code,
'latent_prediction': prediction
}
lapred2p = LaPred2P(1, data=hmm, bias=False)
optimizer = optim.Adam(lapred2p.parameters())
criterion = nn.MSELoss()
loss_traj = []
angles = []
running_loss = 0
for epoch in tqdm(range(10)):
for i, data in enumerate(hmm):
if i<len(hmm):
if i % 10 == 0:
est = list(lapred2p.parameters())[0].detach().numpy()
angles.append(np.matmul(ideal, est.T)/np.sqrt(np.matmul(est, est.T)))
optimizer.zero_grad()
prediction = lapred2p(data)
loss = criterion(prediction['latent_prediction'], data['future_latent_values']) + \
criterion(prediction['latent_values'], data['latent_values'])
loss.backward()
optimizer.step()
running_loss += loss
if i % 50 == 49:
loss_traj.append(running_loss.detach().numpy()/50)
running_loss = 0
list(lapred2p.parameters())[0]
(gg.ggplot(
lazytools.array_to_dataframe(
np.array(loss_traj)
),
gg.aes(x='dim0', y='array')
) +
gg.geom_smooth(method='loess'))
(gg.ggplot(
lazytools.array_to_dataframe(
np.concatenate(angles, axis=1)
),
gg.aes(x='dim1', y='array', color='dim0', group='dim0')
) +
gg.geom_line())
```
## Contrastive predictive model
```
cts = patches.data.Contrastive1DTimeSeries(data=cbm.to_array())
ce = patches.networks.LinearScaffold(latent_features=1, data=cts)
criterion = patches.losses.ContrastiveLoss(loss=nn.MSELoss())
optimizer = optim.Adam(ce.parameters())
angles = []
loss_traj = []
running_loss = 0
for epoch in tqdm(range(10)):
for i, data in enumerate(cts):
if i<len(cts):
if i % 10 == 0:
est = list(ce.parameters())[0].detach().numpy()
angles.append(np.matmul(ideal, est.T)/np.sqrt(np.matmul(est, est.T)))
optimizer.zero_grad()
code = ce(data)
loss = criterion(code)
loss.backward()
optimizer.step()
running_loss += loss
if i % 50 == 49:
loss_traj.append(running_loss.detach().numpy()/50)
running_loss = 0
(gg.ggplot(
lazytools.array_to_dataframe(
np.array(loss_traj)
),
gg.aes(x='dim0', y='array')
) +
gg.geom_smooth(method='loess'))
(gg.ggplot(
lazytools.array_to_dataframe(
np.concatenate(angles, axis=1)
),
gg.aes(x='dim1', y='array', color='dim0', group='dim0')
) +
gg.geom_line())
list(ce.parameters())
```
## Sampling bias
```
def moving_average(array):
"""Moving average over axis 0."""
cumsum = array.cumsum(axis=0)
length = cumsum.shape[0]
rng = np.arange(1, length+1)
if cumsum.ndim>1:
rng = rng.reshape(length, 1).repeat(cumsum.shape[1], 1)
return cumsum/rng
exposure = moving_average(np.abs(cbm.to_array()))
(gg.ggplot(lazytools.array_to_dataframe(exposure), gg.aes(x='dim0', group='dim1', y='array')) +
gg.geom_line(alpha=0.2) +
gg.scale_x_log10())
coherence = moving_average(1-2*(cbm.latent_array()[:-1,:]!=cbm.latent_array()[1:,:]))
lazytools.array_to_dataframe(coherence)['dim1'].astype(str)
(gg.ggplot(lazytools.array_to_dataframe(coherence),
gg.aes(x='dim0', color='dim1', y='array', group='dim1')) +
gg.geom_line() +
gg.scale_x_log10())
coherence = moving_average(1-2*(cbm.latent_array()[:-2,:]!=cbm.latent_array()[2:,:]))
(gg.ggplot(lazytools.array_to_dataframe(coherence),
gg.aes(x='dim0', color='dim1', y='array', group='dim1')) +
gg.geom_line() +
gg.scale_x_log10())
dfs = []
for t in tqdm(range(1, 10)):
for pos_1 in range(10):
for pos_2 in range(10):
pos_subset = (cbm.to_array()[:-t,pos_1]!=0) & (cbm.to_array()[t:,pos_2]!=0)
tmp_coherence = moving_average(
1-2*(cbm.to_array()[:-t][pos_subset,pos_1]!=cbm.to_array()[t:][pos_subset,pos_2])
)
tmp_df = lazytools.array_to_dataframe(tmp_coherence)
tmp_df['pos_1'] = np.array(pos_1)
tmp_df['pos_2'] = np.array(pos_2)
tmp_df['t'] = np.array(t)
tmp_df['n'] = len(tmp_df)
dfs.append(tmp_df)
df = pd.concat(dfs)
df['dim0'] = (df['dim0']+1)/df['n']
df['coherent'] = (df['pos_1'] <= 4) & (df['pos_2']<= 4)
df['group'] = df['pos_1'].astype(str)+df['pos_2'].astype(str)
(gg.ggplot(df, gg.aes(x='dim0', y='array', group='group', color='coherent')) +
gg.geom_line(alpha=0.2) +
gg.facet_wrap('t') +
gg.scale_x_log10())
(gg.ggplot(df[(df['dim0']==1)], gg.aes(x='array', fill='coherent')) +
gg.geom_histogram(position='identity', alpha=.8) +
gg.facet_wrap('t'))
help(gg.labs)
str(cbm)
cbm.width
```
| github_jupyter |
<center>
<img src="https://gitlab.com/ibm/skills-network/courses/placeholder101/-/raw/master/labs/module%201/images/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
</center>
# **Data Visualization**
Estimated time needed: **30** minutes
In this lab, you will learn how to visualize and interpret data
## Objectives
* Import Libraries
* Lab Exercises
* Identifying duplicates
* Plotting Scatterplots
* Plotting Boxplots
***
## Import Libraries
All Libraries required for this lab are listed below. The libraries pre-installed on Skills Network Labs are commented. If you run this notebook in a different environment, e.g. your desktop, you may need to uncomment and install certain libraries.
```
# !pip install pandas
# !pip install numpy
# !pip install matplotlib
# !pip install seaborn
```
Import the libraries we need for the lab
```
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
```
Read in the csv file from the url using the request library
```
ratings_url = 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-ST0151EN-SkillsNetwork/labs/teachingratings.csv'
ratings_df = pd.read_csv(ratings_url)
```
## Lab Exercises
### Identify all duplicate cases using prof. Using all observations, find the average and standard deviation for age. Repeat the analysis by first filtering the data set to include one observation for each instructor with a total number of observations restricted to 94.
Identify all duplicate cases using prof variable - find the unique values of the prof variables
```
ratings_df.prof.unique()
```
Print out the number of unique values in the prof variable
```
ratings_df.prof.nunique()
```
Using all observations, Find the average and standard deviation for age
```
ratings_df['age'].mean()
ratings_df['age'].std()
```
Repeat the analysis by first filtering the data set to include one observation for each instructor with a total number of observations restricted to 94.
> first we drop duplicates using prof as a subset and assign it a new dataframe name called no_duplicates_ratings_df
```
no_duplicates_ratings_df = ratings_df.drop_duplicates(subset =['prof'])
no_duplicates_ratings_df.head()
```
> Use the new dataset to get the mean of age
```
no_duplicates_ratings_df['age'].mean()
no_duplicates_ratings_df['age'].std()
```
### Using a bar chart, demonstrate if instructors teaching lower-division courses receive higher average teaching evaluations.
```
ratings_df.head()
```
Find the average teaching evaluation in both groups of upper and lower-division
```
division_eval = ratings_df.groupby('division')[['eval']].mean().reset_index()
```
Plot the barplot using the seaborn library
```
sns.set(style="whitegrid")
ax = sns.barplot(x="division", y="eval", data=division_eval)
```
### Plot the relationship between age and teaching evaluation scores.
Create a scatterplot with the scatterplot function in the seaborn library
```
ax = sns.scatterplot(x='age', y='eval', data=ratings_df)
```
### Using gender-differentiated scatter plots, plot the relationship between age and teaching evaluation scores.
Create a scatterplot with the scatterplot function in the seaborn library this time add the <code>hue</code> argument
```
ax = sns.scatterplot(x='age', y='eval', hue='gender',
data=ratings_df)
```
### Create a box plot for beauty scores differentiated by credits.
We use the <code>boxplot()</code> function from the seaborn library
```
ax = sns.boxplot(x='credits', y='beauty', data=ratings_df)
```
### What is the number of courses taught by gender?
We use the <code>catplot()</code> function from the seaborn library
```
sns.catplot(x='gender', kind='count', data=ratings_df)
```
### Create a group histogram of taught by gender and tenure
We will add the <code>hue = Tenure</code> argument
```
sns.catplot(x='gender', hue = 'tenure', kind='count', data=ratings_df)
```
### Add division as another factor to the above histogram
We add another argument named <code>row</code> and use the division variable as the row
```
sns.catplot(x='gender', hue = 'tenure', row = 'division',
kind='count', data=ratings_df,
height = 3, aspect = 2)
```
### Create a scatterplot of age and evaluation scores, differentiated by gender and tenure
Use the <code>relplot()</code> function for complex scatter plots
```
sns.relplot(x="age", y="eval", hue="gender",
row="tenure",
data=ratings_df, height = 3, aspect = 2)
```
### Create a distribution plot of teaching evaluation scores
We use the <code>distplot()</code> function from the seaborn library, set <code>kde = false</code> because we don'e need the curve
```
ax = sns.distplot(ratings_df['eval'], kde = False)
```
### Create a distribution plot of teaching evaluation score with gender as a factor
```
## use the distplot function from the seaborn library
sns.distplot(ratings_df[ratings_df['gender'] == 'female']['eval'], color='green', kde=False)
sns.distplot(ratings_df[ratings_df['gender'] == 'male']['eval'], color="orange", kde=False)
plt.show()
```
### Create a box plot - age of the instructor by gender
```
ax = sns.boxplot(x="gender", y="age", data=ratings_df)
```
### Compare age along with tenure and gender
```
ax = sns.boxplot(x="tenure", y="age", hue="gender",
data=ratings_df)
```
## Practice Questions
### Question 1: Create a distribution plot of beauty scores with Native English speaker as a factor
* Make the color of the native English speakers plot - orange and non - native English speakers - blue
```
## insert code
```
Double-click **here** for the solution.
<!-- The answer is below:
sns.distplot(ratings_df[ratings_df['native'] == 'yes']['beauty'], color="orange", kde=False)
sns.distplot(ratings_df[ratings_df['native'] == 'no']['beauty'], color="blue", kde=False)
plt.show()
-->
### Question 2: Create a Horizontal box plot of the age of the instructors by visible minority
```
## insert code
```
Double-click **here** for a hint.
<!-- The hint is below:
Remember that the positions of the argument determine whether it will be vertical or horizontal
-->
Double-click **here** for the solution.
<!-- The answer is below:
ax = sns.boxplot(x="age", y="minority", data=ratings_df)
-->
### Question 3: Create a group histogram of tenure by minority and add the gender factor
```
## insert code
```
Double-click **here** for the solution.
<!-- The answer is below:
sns.catplot(x='tenure', hue = 'minority', row = 'gender',
kind='count', data=ratings_df,
height = 3, aspect = 2)
-->
### Question 4: Create a boxplot of the age variable
```
## insert code
```
Double-click **here** for the solution.
<!-- The answer is below:
## you only habve to specify the y-variable
ax = sns.boxplot(y="age", data=ratings_df)
-->
## Authors
[Aije Egwaikhide](https://www.linkedin.com/in/aije-egwaikhide/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkST0151ENSkillsNetwork20531532-2021-01-01) is a Data Scientist at IBM who holds a degree in Economics and Statistics from the University of Manitoba and a Post-grad in Business Analytics from St. Lawrence College, Kingston. She is a current employee of IBM where she started as a Junior Data Scientist at the Global Business Services (GBS) in 2018. Her main role was making meaning out of data for their Oil and Gas clients through basic statistics and advanced Machine Learning algorithms. The highlight of her time in GBS was creating a customized end-to-end Machine learning and Statistics solution on optimizing operations in the Oil and Gas wells. She moved to the Cognitive Systems Group as a Senior Data Scientist where she will be providing the team with actionable insights using Data Science techniques and further improve processes through building machine learning solutions. She recently joined the IBM Developer Skills Network group where she brings her real-world experience to the courses she creates.
## Change Log
| Date (YYYY-MM-DD) | Version | Changed By | Change Description |
| ----------------- | ------- | --------------- | -------------------------------------- |
| 2020-08-14 | 0.1 | Aije Egwaikhide | Created the initial version of the lab |
Copyright © 2020 IBM Corporation. This notebook and its source code are released under the terms of the [MIT License](https://cognitiveclass.ai/mit-license/?utm_medium=Exinfluencer&utm_source=Exinfluencer&utm_content=000026UJ&utm_term=10006555&utm_id=NA-SkillsNetwork-Channel-SkillsNetworkCoursesIBMDeveloperSkillsNetworkST0151ENSkillsNetwork20531532-2021-01-01).
| github_jupyter |
<!--  -->
## Introduction
Understanding heat transport in semiconductors and insulators is of fundamental importance because of its technological impact in electronics and renewable energy harvesting and conversion.
Anharmonic Lattice Dynamics provides a powerful framework for the description of heat transport at the nanoscale. One of the advantages of this method is that it naturally includes quantum effects due to atoms vibrations, which are needed to compute thermal properties of semiconductors widely use in nanotechnology, like Silicon and Carbon, even at room temperature.
While heat transport in amorphous and crystalline semiconductors has a different microscopic origin, a unified approach to simulate both crystals and glasses has been devised.
Here we introduce a unified workflow, which implements both the Boltzmann Transport equation (BTE) and the Quasi Harmonic Green-Kubo (QHGK) methods. We discuss how the theory can be optimized to exploit modern parallel architectures, and how it is implemented in kALDo: a versatile and scalable open-source software to compute phonon transport in solids.
## Theory
In semiconductors, electronic and vibrational dynamics often occur over different time scales, and can thus be decoupled using the Born Oppenheimer approximation. Under this assumption, the potential $\phi$ of a system made of $N_{atoms}$ atoms, is a function of all the $x_{i\alpha}$ atomic positions, where $i$ and $\alpha$ refer to the atomic and Cartesian indices, respectively. Near thermal equilibrium, the potential energy can be Taylor expanded in the atomic displacements, $\mathbf{u}=\mathbf x-\mathbf{x}_{\rm equilibrium}$,
$$
\phi(\{x_{i\alpha}\})=\phi_0 +
\sum_{i\alpha}\phi^{\prime}_{i\alpha }u_{i\alpha}
+\frac{1}{2}
\sum_{i\alpha i'\alpha'}
\phi^{\prime\prime}_{i\alpha i'\alpha '}u_{i\alpha} u_{i'\alpha'}+
$$
$$
+
\frac{1}{3!}\sum_{i\alpha i'\alpha 'i''\alpha ''}
\phi^{\prime\prime\prime}_{i\alpha i'\alpha 'i''\alpha ''} u_{i\alpha }u_{i'\alpha '} u_{i''\alpha ''} + \dots,
$$
where
$$
\phi^{\prime\prime}_{i\alpha i'\alpha '}=\frac{\partial^{2} \phi}{\partial u_{i\alpha } \partial u_{i'\alpha '} },\qquad
\phi^{\prime\prime\prime}_{i\alpha i'\alpha 'i''\alpha ''}=\frac{\partial^{3} \phi}{\partial u_{i\alpha } \partial u_{i'\alpha '} \partial u_{i''\alpha ''}},
$$
are the second and third order interatomic force constants (IFC). The term $\phi_0$ can be discarded, and the forces $F = - \phi^{\prime}$ are zero at equilibrium.
The IFCs can be evaluated by finite difference, which consists in calculating the difference between the forces acting on the system when one of the atoms is displaced by a small finite shift along a Cartesian direction. The second and third order IFCs need respectively, $2N_{atoms}$, and $4N_{atoms}^2$ forces calculations. In crystals, this amount can be reduced exploiting the spatial symmetries of the system, or adopting a compressed sensing approach. In the framework of DFT, it is also possible and often convenient to compute IFCs using perturbation theory.
The dynamical matrix is the second order IFC rescaled by the masses, $D_{i\alpha i'\alpha}=\phi^{\prime\prime}_{i\alpha i'\alpha'}/\sqrt{m_im_{i'}}$. It is diagonal in the phonons basis
$$
\sum_{i'\alpha'} D_{i\alpha i'\alpha'}\eta_{i'\alpha'\mu} =\eta_{i\alpha\mu} \omega_\mu^2
$$
and $\omega_\mu/(2\pi)$ are the frequencies of the normal modes of the system.
For crystals, where there is long range order due to the periodicity, the dimensionality of the problem can be reduced. The Fourier transfom maps the large direct space onto a compact volume in the reciprocal space: the Brillouin zone. More precisely we adopt a supercell approach, where we calculate the dynamical matrix on $N_{\rm replicas}$ replicas of a unit cell of $N_{\rm unit}$ atoms, at positions $\mathbf R_l$, and calculate
$$
D_{i \alpha k i' \alpha'}=\sum_l \chi_{kl} D_{i \alpha l i' \alpha'},\quad \chi_{kl} = \mathrm{e}^{-i \mathbf{q_k}\cdot \mathbf{R}_{l} },
$$
where $\mathbf q_k$ is a grid of size $N_k$ indexed by $k$ and the eigenvalue equation becomes
$$
\sum_{i'\alpha'} D_{i \alpha k i' \alpha'} \eta_{i' \alpha'k s}=\omega_{k m}^{2} \eta_{i \alpha k s }.
$$
which now depends on the quasi-momentum index, $k$, and the phonons mode $s$.
### Boltzman Transport Equation
At finite temperature $T$, the Bose Einstein statistic is the quantum distribution for atomic vibrations
$$
n_{\mu}=n(\omega_{\mu})=\frac{1}{e^{\frac{\hbar\omega_{\mu}}{k_B T}}-1}
$$
where $k_B$ is the Boltzmann constant and we use $\mu =(k,s)$.
We consider a small temperature gradient applied along the $\alpha$-axis of a crystalline material. If the phonons population depends on the position only through the temperature, $\frac{\partial n_{\mu\alpha}}{\partial x_\alpha} = \frac{\partial n_{\mu\alpha}}{\partial T}\nabla_\alpha T$, we can Taylor expand it
$$
\tilde n_{\mu\alpha} \simeq n_\mu + \lambda_{\mu\alpha} \frac{\partial n_\mu}{\partial x_\alpha} \simeq n_\mu + \psi_{\mu\alpha}\nabla_\alpha T
$$
with $\psi_{\mu\alpha}=\lambda_{\mu\alpha} \frac{\partial n_\mu}{\partial T}$, where $\lambda_{\mu\alpha}$ is the phonons mean free path.
Being quantum quasi-particles, phonons have a well-defined group velocity, which, for the acoustic modes in the long wavelength limit, corresponds to the speed of sound in the material,
$$
v_{ ks\alpha}=\frac{\partial \omega_{k s}}{\partial {q_{k\alpha}}} = \frac{1}{2\omega_{ks}}\sum_{i\beta l i'\beta'}
i R_{l \alpha} D_{i\beta li'\beta'}\chi_{kl}
\eta_{ks i\beta}\eta_{ksi'\beta}
$$
and the last equality is obtained by applying the derivative with respect to $\mathbf{q}_k$ directly to tbe eigenvectors Equation
The heat current per mode is written in terms of the phonon energy $\hbar \omega$, velocity $v$, and out-of-equilibrium phonons population, $\tilde n$:
$$
j_{\mu\alpha'} =\sum_\alpha \hbar \omega_\mu v_{\mu\alpha'} (\tilde n_{\mu\alpha} - n_{\mu})\simeq- \sum_\alpha c_\mu v_{\mu\alpha'} \mathbf{\lambda}_{\mu\alpha} \nabla_\alpha T .
$$
As we deal with extended systems, we can assume heat transport in the diffusive regime, and we can use Fourier's law
$$
J_{\alpha}=-\sum_{\alpha'}\kappa_{\alpha\alpha'} \nabla_{\alpha'} T,
$$
where the heat current is the sum of the contribution from each phonon mode: $J_\alpha = 1/(N_k V)\sum_\mu j_{\mu\alpha}$.
The thermal conductivity then results:
$$
\kappa_{\alpha \alpha'}=\frac{1}{ V N_k} \sum_{\mu} c_\mu v_{\mu\alpha} \lambda_{\mu\alpha'},
$$
where we defined the heat capacity per mode
$$
c_\mu=\hbar \omega_\mu \frac{\partial n_\mu}{\partial T},
$$
which is connected to total heat capacity through $C = \sum_\mu c_\mu /NV$.
We can now introduce the BTE, which combines the kinetic theory of gases with collective phonons vibrations:
$$
{\mathbf{v}}_{\mu} \cdot {\boldsymbol{\nabla}} T \frac{\partial n_{\mu}}{\partial T}=\left.\frac{\partial n_{\mu}}{\partial t}\right|_{\text {scatt}},
$$
where the scattering term, in the linearized form is
$$
\left.\frac{\partial n_{\mu}}{\partial t}\right|_{\text {scatt}}=
$$
$$
\frac{\nabla_\alpha T}{\omega_\mu N_k}\sum_{\mu^{\prime} \mu^{\prime \prime}}^{+} \Gamma_{\mu \mu^{\prime} \mu^{\prime \prime}}^{+}
\left(\omega_\mu\mathbf{\psi}_{\mu\alpha}
+\omega_{\mu^{\prime}}\mathbf{\psi}_{\mu^{\prime}\alpha}
-\omega_{\mu^{\prime \prime}} \mathbf{\psi}_{\mu^{\prime \prime}\alpha}\right)
+
$$
$$
+\frac{\nabla_\alpha T}{\omega_\mu N_k}\sum_{\mu^{\prime} \mu^{\prime \prime}}^{-} \frac{1}{2} \Gamma_{\mu \mu^{\prime} \mu^{\prime \prime}}^{-}
\left(\omega_\mu\mathbf{\psi}_{\mu\alpha}
-\omega_{\mu^{\prime}} \mathbf{\psi}_{\mu^{\prime}\alpha}
-\omega_{\mu^{\prime \prime}} \mathbf{\psi}_{\mu^{\prime \prime}\alpha}\right) .
$$
$\Gamma^{+}_{\mu\mu'\mu''}$ and $\Gamma^{-}_{\mu\mu'\mu''}$ are the scattering rates for three-phonon scattering processes, and they correspond to the events of phonons annihilation $\mu, \mu'\rightarrow\mu''$ and phonons creation $\mu \rightarrow\mu',\mu''
$$
\Gamma_{\mu \mu^{\prime} \mu^{\prime \prime}}^{\pm} =\frac{\hbar \pi}{8} \frac{g_{\mu\mu'\mu''}^{\pm}}{\omega_{\mu} \omega_{\mu'} \omega_{\mu''}}\left|\phi_{\mu \mu^{\prime} \mu^{\prime \prime}}^{\pm}\right|^{2},
$$
and the projection of the potentials on the phonon modes are given by
$$
\phi^\pm
_{ksk's'k'' s''}=
\sum_{il'i'l''i''}
\frac{
\phi_{il'i'l''i''}}
{\sqrt{m_{i}m_{i'}m_{i''}}}
\eta_{i ks}\eta^{\pm}_{i'k' s'}
\eta^*_{i''k''s''}\chi^\pm_{k'l'}\chi^*_{k''l''}
$$
with $\eta^+=\eta$, $\chi^+=\chi$ and $\eta^-=\eta^*$, $\chi^-=\chi^*$.
The phase space volume $g^\pm_{\mu\mu^\prime\mu^{\prime\prime}}$ in the previous equation are defined as
$$
g^+_{\mu\mu^\prime\mu^{\prime\prime}} = (n_{\mu'}-n_{\mu''})
\delta^+_{\mu\mu^\prime\mu^{\prime\prime}}
$$
$$
g^-_{\mu\mu^\prime\mu^{\prime\prime}} = (1 + n_{\mu'}+n_{\mu''})
\delta^-_{\mu\mu^\prime\mu^{\prime\prime}},
$$
and include the $\delta$ for the conservation of the energy and momentum in three-phonons scattering processes,
$$
\delta_{ks k's' k''s''}^{\pm}=
\delta_{\mathbf q_{k}\pm\mathbf q_{k'}-\mathbf q_{k''}, \mathbf Q}
\delta\left(\omega_{ks}\pm\omega_{k's'}-\omega_{k''s''}\right),
$$
with $Q$ the lattice vectors. Finally, the normalized phase-space per mode $g_\mu=\frac{1}{N}\sum_{\mu'\mu''}g_{\mu\mu'\mu''}$, provides useful information about the weight of a specific mode in the anharmonic scattering processes.
In order to calculate the conductivity, we express the mean free path in terms of the 3-phonon scattering rates
$$
v_{\mu\alpha} = \tilde \Gamma_{\mu\mu' }\lambda_\mu = (\delta_{\mu\mu'}\Gamma^0_\mu + \Gamma^{1}_{\mu\mu'})\lambda_{\mu\alpha},
$$
where we introduced
$$
\Gamma^{0}_\mu=\sum_{\mu'\mu''}(\Gamma^+_{\mu\mu'\mu''} + \Gamma^-_{\mu\mu'\mu''} ),
$$
and
$$
\Gamma^{1}_{\mu\mu'}=
\frac{\omega_{\mu'}}{\omega_\mu}
\sum_{\mu''}(\Gamma^+_{\mu\mu'\mu''}
-\Gamma^+_{\mu\mu''\mu'}
-\Gamma^-_{\mu\mu'\mu''}
-\Gamma^-_{\mu\mu''\mu'}
).
$$
In RTA, the off-diagonal terms are ignored, $\Gamma^{1}_{\mu\mu'}=0$, and the conductivity is
$$
\kappa_{\alpha\alpha'} =\frac{1}{N_kV} \sum_{\mu}c_\mu v_{\mu\alpha}{\lambda_{\mu\alpha'}}
=\frac{1}{N_kV} \sum_\mu c_\mu v_{\mu\alpha} {\tau_\mu}{v_{\mu\alpha'}},
$$
where $\tau_\mu=1/2\Gamma_{\mu}^0$ corresponds the phonons lifetime calculated using the Fermi Golden Rule.
It has been shown that, to correctly capture the physics of phonon transport, especially in highly conductive materials, the off diagonal terms of the scattering rates cannot be disregarded.
More generally, the mean free path is calculated inverting the scattering tensor
$$
\lambda_{\mu\alpha} = \sum_{\mu'}(\tilde \Gamma_{\mu\mu' })^{-1}v_{\mu'\alpha}.
$$
$$
\kappa_{\alpha\alpha'} =\frac{1}{N_kV} \sum_{\mu\mu'} c_\mu v_{\mu\alpha}(\tilde \Gamma_{\mu\mu' })^{-1}v_{\mu'\alpha'}.
$$
This inversion operation is computationally expensive; however, when the off-diagonal elements of the scattering rate matrix are much smaller than the diagonal, we can rewrite the mean free path obtained from the BTE as a series:
$$
\lambda_{\mu\alpha} = \sum_{\mu'}\left(\delta_{\mu\mu'} + \frac{1}{\Gamma^0_\mu}\Gamma^{1}_{\mu\mu'}\right)^{-1}\frac{1}{\Gamma^0_{\mu'}}v_{\mu'\alpha} =
$$
$$
=
\sum_{\mu'}
\left[
\sum^{\infty}_{n=0}\left(- \frac{1}{\Gamma^0_\mu}\Gamma^{1}_{\mu\mu'}\right)^n
\right]\frac{1}{\Gamma^0_{\mu'}}v_{\mu'\alpha} ,
$$
where in the last step we used the identity $\sum_0 q^n = (1 - q)^{-1}$, true when $|q|=|\Gamma^1/\Gamma^0|<1$.
This equation can then be written in an iterative form
$$
\lambda^0_{\mu\alpha} = \frac{1}{\Gamma^0_\mu}v_\mu
\qquad
\lambda^{n+1}_{\mu\alpha} = - \frac{1}{\Gamma^0_\mu}\sum_{\mu'}\Gamma^{1}_{\mu\mu'} \lambda^{n}_{\mu'\alpha}.
$$
Hence, the inversion in of the scattering tensor is obtained by a recursive expression. Once the mean free path is calculated, the conductivity is straightforwardly computed.
### Quasi-Harmonic Green Kubo
In non-crystalline solids with no long range order, such as glasses, alloys, nano-crystalline, and partially disordered systems, the phonon picture is formally not well-defined. While vibrational modes are still the heat carriers, their mean-free-paths may be so short that the quasi-particle picture of heat carriers breaks down and the BTE is no longer applicable.
In glasses heat transport is dominated by a diffusive processes in which delocalized modes with similar frequency transfer energy from one to another.
Whereas this mechanism is intrinsically distinct from the underlying hypothesis of the BTE approach, the two transport pictures have been recently reconciled in a unified theory, in which the thermal conductivity is written as:
$$
\kappa_{\alpha \alpha'}=\frac{1}{V} \sum_{\mu \mu'} c_{\mu \mu'} v_{\mu \mu' \alpha} v_{\mu \mu' \alpha'} \tau_{\mu \mu'}.
$$
This expression is analogous to the RTA one, where modal heat capacity, phonon group velocity and lifetimes are replaced by
the generalized heat capacity,
$$
c_{\mu \mu'}=\frac{\hbar \omega_{\mu} \omega_{\mu'}}{T} \frac{n_{\mu}-n_{\mu'}}{\omega_{\mu}-\omega_{\mu'}},
$$
the generalized velocities,
$$
v_{\mu\mu'\alpha}=\frac{1}{2\sqrt{\omega_\mu\omega_{\mu'}}}
\sum_{ii'\beta'\beta''}(x_{i\alpha}-x_{i'\alpha })D_{i\beta i'\beta'}\eta_{\mu i\beta}\eta_{\mu'i'\beta'},
$$
and the generalized lifetime $\tau_{\mu\mu'}$.
The latter is expressed as a Lorentzian, which weighs diffusive processes between phonons with nearly-resonant frequencies:
$$
\tau_{\mu\mu'} =
\frac{\gamma_{\mu}+\gamma_{\mu'}}{\left(\omega_{\mu}-\omega_{\mu'}\right)^{2}+\left(\gamma_{\mu}+\gamma_{\mu'}\right)^{2}}
$$
where $\gamma_\mu$ is the line width of mode $\mu$ that can be computed using Fermi Golden rule.
These equations have been derived from the Green-Kubo theory of linear response applied to thermal conductivity, by taking a quasi-harmonic approximation of the heat current, from which this approach is named quasi-harmonic Green-Kubo (QHGK).
It has been proven that for crystalline materials QHGK is formally equivalent to the BTE in the relaxation time approximation, and that its classical limit reproduces correctly molecular dynamics simulations for amorphous silicon up to relatively high temperature (600 K).
Finally, we provide a microscopic definition of the mode diffusivity,
$$
D_{\mu} =\frac{1}{N_k V} \sum_{\mu'}v_{\mu\mu'} \tau_{\mu\mu'}v_{\mu\mu'},
$$
which conveniently provide a measure of the temperature-independent contribution of each mode to thermal transport.
## Benchmarks applications
The workflow for ALD calculations is illustrated below
<img src="_resources/timeline.png" width="650">
Here, we present two example simulations of both a periodic and an amorphous structure.
### *Ab initio* silicon diamond
In this example we calculate the second order IFC using Density Functional Perturbation Theory as implemented in the Quantum-Espresso package. The phonon lifetimes and thermal conductivity calculations are performed using a (19, 19, 19) q-point grid.
The kALDo minimal input file, looks like
```python
# IFCs object creation using ase.build.bulk
fc = ForceConstants(atoms=bulk('Si', 'diamond', a=2.699),
supercell=(5, 5, 5)))
# input is the ASE input for QE
fc.second.calculate(calculator=Espresso(**input))
fc.third.calculate(calculator=Espresso(**input))
# Phonons object creation
phonons = Phonons(force_constants=fc,
kpts=[19, 19, 19],
temperature=300)
# Conductivity calculations
cond = Conductivity(phonons=phonons))
print('Thermal conductivity matrix, in (W/m/K):')
print(cond.conductivity(method='inverse').sum(axis=0))
```
We performed the simulation using the local density approximation for the exchange and correlation functional and a Bachelet-Hamann-Schluter norm-conserving pseudoptential. Kohn-Sham orbitals are represented on a plane-waves basis set with a cutoff of 20 Ry and (8, 8, 8) k-points mesh. The minimized lattice parameter is 5.398A. The third-order IFC is calculated using finite difference displacement on (5, 5, 5) replicas of the irreducible fcc-unit cell, including up to the 5th nearest neighbor.
We obtained the following thermal properties
<img src="_resources/si-diamond-observables.png" width="650">
The silicon diamond modes analysis is shown above. Quantum (red) and classical (blue) results are compared. a) Normalized density of states, b) Normalized phase-space per mode $g$, c) lifetime per mode $\tau$, d) mean free path $\lambda$, and e) cumulative conductivity $\kappa_{cum}$.
### Amorphous silicon
Here we study a-Si generated by LAMMPS molecular dynamics simulations of quenching from the melt a 4096 atom crystal silicon structure, with 1989 Tersoff interatomic potential and the QHGK method. The minimal input file looks like the following
```python
# IFCs object creation
fc = ForceConstants.from_folder(atoms,
folder='./input_data'))
# Phonons object creation
phon = Phonons(force_constants=fc,
temperature=300)
# Conductivity calculations
cond = Conductivity(phonons=phonons))
print('Thermal conductivity matrix, in (W/m/K):')
print(cond.conductivity(method='qhgk').sum(axis=0))
```
In a simliar treatment to the silicon crystal, a full battery of modal analysis can be calculated with both quantum and classical statistics on the amorphous systems re- turning the phonon DoS as well as the associated lifetimes, generalized diffusivities, normalized phase space and cumulative conductivity
<img src="_resources/amorphous.png" width="650">
Classical and quantum properties for 4096 atom amorphous silicon system are shown above. a) density of states, b) lifetimes, c) diffusivities, and e) cumulative thermal conductivity. In spite of the increased quantum lifetimes, a decrease of 0.17W/m/K is seen in the quantum conductivity. The difference in conductivity is primarily a result of the overestimation of classical high frequency heat capacities.
### References
[1]: B. J. Alder, D. M. Gass, and T. E. Wainwright, “Studies in Molecular Dynamics. VIII. The Transport Coefficients for a Hard-Sphere Fluid,” Journal Chemical Physics 53, 3813–3826 (1970).
[2]: A. J. C. Ladd, B. Moran, and W. G. Hoover, “Lattice thermal conductivity: A comparison of molecular dynamics and anharmonic lattice dynamics,” Physical Review B 34, 5058–5064 (1986).
[3]: A. Marcolongo, P. Umari, and S. Baroni, “Microscopic theory and quantum simulation of atomic heat transport,” Nature Physics 12, 80–84 (2015).
[4]: R. Peierls, “Zur kinetischen Theorie der Wärmeleitung in Kristallen,” Annalen der Physik 395, 1055–1101 (1929).
[5]: J. M. Ziman, Electrons and Phonons: The Theory of Transport Phenomena in Solids, International series of monographs on physics (OUP Oxford, 2001).
[6]: A. J. H. McGaughey, A. Jain, and H.-Y. Kim, “Phonon properties and thermal conductivity from first principles, lattice dynamics, and the Boltzmann transport equation,” Journal of Applied Physics 125, 011101–20 (2019).
[7]: M. Omini and A. Sparavigna, “Beyond the isotropic-model approximation in the theory of thermal conductivity,” Physical Review B 53, 9064–9073 (1996).
[8]: A. Ward, D. A. Broido, D. A. Stewart, and G. Deinzer, “Ab initio theory of the lattice thermal conductivity in diamond,” Physical Review B 80, 125203 (2009).
[9]: L. Chaput, A. Togo, I. Tanaka, and G. Hug, “Phonon-phonon interactions in transition metals,” Physical Review B 84, 094302 (2011).
[10]: W. Li, J. Carrete, N. A. Katcho, and N. Mingo, “ShengBTE: A solver of the Boltzmann transport equation for phonons,” Computer Physics Communications 185, 1747–1758 (2014).
[11]: G. Fugallo, M. Lazzeri, L. Paulatto, and F. M. B, “Ab initio variational approach for evaluating lattice thermal conductivity,” Physical Review B 88, 045430 (2013).
[12]: A. Cepellotti and N. Marzari, “Thermal Transport in Crystals as a Kinetic Theory of Relaxons,” Physical Review X 6, 041013–14 (2016).
[13]: A. Chernatynskiy and S. R. Phillpot, “Phonon Transport Simulator (PhonTS),” Computer Physics Communications 192, 196–204 (2015).
[14]: A. Togo, L. Chaput, and I. Tanaka, “Distributions of phonon lifetimes in brillouin zones,” Physical Review B 91, 094306 (2015).
[15]: J. Carrete, B. Vermeersch, A. Katre, A. van Roekeghem, T. Wang, G. K. H. Madsen, and N. Mingo, “almaBTE : A solver of the space–time dependent Boltzmann transport equation for phonons in structured materials,” Computer Physics Communications 220, 351–362 (2017).
[16]: T. Tadano, Y. Gohda, and S. Tsuneyuki, “Anharmonic force constants extracted from first-principles molecular dynamics: applications to heat transfer simulations,” Journal of Physics: Condensed Matter 26, 225402–13 (2014).
[17]: D. A. Broido, M. Malorny, G. Birner, N. Mingo, and D. A. Stewart, “Intrinsic lattice thermal conductivity of semiconductors from first principles,” Applied Physics Letters 91, 231922 (2007).
[18]: L. Lindsay, A. Katre, A. Cepellotti, and N. Mingo, “Perspective on ab initio phonon thermal transport,” Journal Applied Physics 126, 050902–21 (2019).
[19]: L. Lindsay, D. A. Broido, and T. L. Reinecke, “First-Principles Determination of Ultrahigh Thermal Conductivity of Boron Arsenide: A Competitor for Diamond?” Physical Review Letters 111, 025901–5 (2013).
[20]: G. Fugallo, A. Cepellotti, L. Paulatto, M. Lazzeri, N. Marzari, and F. Mauri, “Thermal Conductivity of Graphene and Graphite: Collective Ex- citations and Mean Free Paths,” Nano Letters 14, 6109–6114 (2014).
[21]: A. Cepellotti, G. Fugallo, L. Paulatto, M. Lazzeri, F. Mauri, and N. Marzari, “Phonon hydrodynamics in two-dimensional materials,” Na- ture Communications 6, 6400 (2015).
[22]: A. Jain and A. J. H. Mcgaughey, “Strongly anisotropic in-plane thermal transport in single-layer black phosphorene,” Scientific Reports 5, 8501–5 (2015).
[23]: M. Zeraati, S. M. Vaez Allaei, I. Abdolhosseini Sarsari, M. Pourfath, and D. Donadio, “Highly anisotropic thermal conductivity of arsenene: An ab initio study,” Physical Review B 93, 085424 (2016).
[24]: B. Ouyang, S. Chen, Y. Jing, T. Wei, S. Xiong, and D. Donadio, “Enhanced thermoelectric performance of two dimensional MS2 (M=Mo,W) through phase engineering,” Journal of Materiomics 4, 329–337 (2018).
[25]: S. Chen, A. Sood, E. Pop, K. E. Goodson, and D. Donadio, “Strongly tunable anisotropic thermal transport in MoS 2by strain and lithium inter- calation: first-principles calculations,” 2D Materials 6, 025033–10 (2019). 26A. Sood, F. Xiong, S. Chen, R. Cheaito, F. Lian, M. Asheghi, Y. Cui, D. Donadio, K. E. Goodson, and E. Pop, “Quasi-Ballistic Thermal Transport Across MoS 2Thin Films,” Nano Letters 19, 2434–2442 (2019).
[27]: C. Ott, F. Reiter, M. Baumgartner, M. Pielmeier, A. Vogel, P. Walke, S. Burger, M. Ehrenreich, G. Kieslich, D. Daisenberger, J. Armstrong, U. K. Thakur, P. Kumar, S. Chen, D. Donadio, L. S. Walter, R. T. Weitz, K. Shankar, and T. Nilges, “Flexible and Ultrasoft Inorganic 1D Semiconductor and Heterostructure Systems Based on SnIP,” Advanced Functional Materials 271, 1900233 (2019).
[28]: P. B. Allen and J. L. Feldman, “Thermal conductivity of disordered harmonic solids,” Physical Review B 48, 12581–12588 (1993).
[29]: L. Isaeva, G. Barbalinardo, D. Donadio, and S. Baroni, “Modeling heat transport in crystals and glasses from a unified lattice-dynamical approach,”Nature Communications 10, 3853 (2019).
[30]: M. Simoncelli, N. Marzari, and F. Mauri, “Unified theory of thermal transport in crystals and glasses,” Nature Physics 15, 809–813 (2019).
[31]: F. Eriksson, E. Fransson, and P. Erhart, “The Hiphive Package for the Extraction of High-Order Force Constants by Machine Learning,” Advanced Theory and Simulations 2, 1800184–11 (2019).
[32]: S. Baroni, S. de Gironcoli, A. Dal Corso, and P. Giannozzi, “Phonons and related crystal properties from density-functional perturbation theory,” Rev Mod Phys 73, 515–562 (2001).
[33]: L. Paulatto, F. Mauri, and M. Lazzeri, “Anharmonic properties from a generalized third-order ab initioapproach: Theory and applications to graphite and graphene,” Phys. Rev. B 87, 214303–18 (2013).
[34]: G. P. Srivastava, “The Physics of Phonons, ,” Adam Hilger, Bristol 1990. (1990).
[35]: M. S. Green, “Markoff random processes and the statistical mechanics of time-dependent phenomena.” Journal Chemical Physics 20, 1281–1295 (1952).
[36]: M. Green, “Markoff random processes and the statistical mechanics of time-dependent phenomena. ii. irreversible processes in fluids,” Journal Chemical Physics 22, 398–413 (1954).
[37]: R. Kubo, “Statistical-Mechanical Theory of Irreversible Processes. I. General Theory and Simple Applications to Magnetic and Conduction Prob lems,” Journal of the Physical Society of Japan 12, 570–586 (1957).
[38]: R. Kubo, M. Yokota, and S. Nakajima, “Statistical-Mechanical Theory of Irreversible Processes. II. Response to Thermal Disturbance,” Journal of the Physical Society of Japan 12, 1203–1211 (1957).
[39]: Y. He, I. Savic ́, D. Donadio, and G. Galli, “Lattice thermal conductivity of semiconducting bulk materials: atomistic simulations,” Physical Chemistry Chemical Physics 14, 16209–14 (2012).
[40]: A. H. Larsen, J. J. Mortensen, J. Blomqvist, I. E. Castelli, R. Christensen, M. Dułak, J. Friis, M. N. Groves, B. Hammer, C. Hargus, E. D. Hermes, P. C. Jennings, P. B. Jensen, J. Kermode, J. R. Kitchin, E. L. Kolsbjerg, J. Kubal, K. Kaasbjerg, S. Lysgaard, J. B. Maronsson, T. Maxson, T. Olsen, L. Pastewka, A. Peterson, C. Rostgaard, J. Schiøtz, O. Schütt, M. Strange, K. S. Thygesen, T. Vegge, L. Vilhelmsen, M. Walter, Z. Zeng, and K. W. Jacobsen, “The atomic simulation environment—a python library for working with atoms,” Journal of Physics: Condensed Matter 29, 273002 (2017).
[41]: B. Aradi, B. Hourahine, and T. Frauenheim, “Dftb+, a sparse matrix-based implementation of the dftb method,” J Phys Chem A 111, 5678–5684 (2007).
[42]: D. G A Smith and J. Gray, “opt_einsum - A Python package for optimizing contraction order for einsum-like expressions,” Journal of Open Source Software 3, 753–3 (2018).
[43]: P. Giannozzi, O. Andreussi, T. Brumme, O. Bunau, M. B. Nardelli, M. Calandra, R. Car, C. Cavazzoni, D. Ceresoli, M. Cococcioni, N. Colonna, I. Carnimeo, A. D. Corso, S. de Gironcoli, P. Delugas, R. A. D. Jr, A. Ferretti, A. Floris, G. Fratesi, G. Fugallo, R. Gebauer, U. Gerstmann, F. Giustino, T. Gorni, J. Jia, M. Kawamura, H.-Y. Ko, A. Kokalj, E. Küçükbenli, M. Lazzeri, M. Marsili, N. Marzari, F. Mauri, N. L. Nguyen, H.-V. Nguyen, A. O. de-la Roza, L. Paulatto, S. Poncé, D. Rocca, R. Sabatini, B. Santra, M. Schlipf, A. P. Seitsonen, A. Smogunov, I. Timrov, T. Thonhauser, P. Umari, N. Vast, X. Wu, and S. Baroni, “Advanced capabilities for materials modelling with quantum espresso,” Journal of Physics: Condensed Matter 29, 465901 (2017).
[44]: G. B. Bachelet, D. R. Hamann, and M. Schluter, “Pseudopotentials That Work - From H to Pu,” Physical Review B 26, 4199–4228 (1982).
[45]: R. Kremer, K. Graf, M. Cardona, G. Devyatykh, A. Gusev, A. Gibin, A. In- yushkin, A. Taldenkov, and H. Pohl, “Thermal conductivity of isotopically enriched Si-28: revisited,” Solid State Communications 131, 499–503 (2004).
[46]: J. Tersoff, “Modeling solid-state chemistry: Interatomic potentials for multicomponent systems,” Physical Review B 39, 5566–5568 (1989).
[41]: A. Krylov, T. L. Windus, T. Barnes, E. Marin-Rimoldi, J. A. Nash, B. Pritchard, D. G. Smith, D. Altarawy, P. Saxe, C. Clementi, T. D. Crawford, R. J. Harrison, S. Jha, V. S. Pande, and T. Head-Gordon, “Perspective: Computational chemistry software and its advancement as illustrated through three grand challenge cases for molecular science,” Journal of Chemical Physics 149, 180901 (2018).
[42]: N.Wilkins-Diehrand, T.D.Crawford,“NSF’s Inaugural Software Institutes: The Science Gateways Community Institute and the Molecular Sciences Software Institute.” Computing in Science and Engineering 20 (2018).
[43]: W. Li, N. Mingo, L. Lindsay, D. A. Broido, D. A. Stewart, and N. A. Katcho, “Thermal conductivity of diamond nanowires from first princi- ples,” Physical Review B 85, 195436 (2012).
| github_jupyter |
```
import keras
from keras.models import Sequential, Model, load_model
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Lambda
from keras.layers import Conv2D, MaxPooling2D, Conv1D, MaxPooling1D, LSTM, ConvLSTM2D, GRU, BatchNormalization, LocallyConnected2D, Permute
from keras.layers import Concatenate, Reshape, Softmax, Conv2DTranspose, Embedding, Multiply
from keras.callbacks import ModelCheckpoint, EarlyStopping, Callback
from keras import regularizers
from keras import backend as K
import keras.losses
import tensorflow as tf
#tf.compat.v1.enable_eager_execution()
from tensorflow.python.framework import ops
import isolearn.keras as iso
import numpy as np
import tensorflow as tf
import logging
logging.getLogger('tensorflow').setLevel(logging.ERROR)
import pandas as pd
import os
import pickle
import numpy as np
import random
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
import isolearn.io as isoio
import isolearn.keras as isol
from genesis.visualization import *
from genesis.generator import *
from genesis.predictor import *
from genesis.optimizer import *
from definitions.generator.aparent_deconv_conv_generator_concat_trainmode import load_generator_network
from definitions.predictor.aparent import load_saved_predictor
import sklearn
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
from scipy.stats import pearsonr
import seaborn as sns
from matplotlib import colors
from scipy.stats import norm
from genesis.vae import *
def set_seed(seed_value) :
# 1. Set the `PYTHONHASHSEED` environment variable at a fixed value
os.environ['PYTHONHASHSEED']=str(seed_value)
# 2. Set the `python` built-in pseudo-random generator at a fixed value
random.seed(seed_value)
# 3. Set the `numpy` pseudo-random generator at a fixed value
np.random.seed(seed_value)
# 4. Set the `tensorflow` pseudo-random generator at a fixed value
tf.set_random_seed(seed_value)
# 5. Configure a new global `tensorflow` session
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
def load_data(data_name, valid_set_size=0.05, test_set_size=0.05, batch_size=32) :
#Load cached dataframe
cached_dict = pickle.load(open(data_name, 'rb'))
plasmid_df = cached_dict['plasmid_df']
plasmid_cuts = cached_dict['plasmid_cuts']
#print("len(plasmid_df) = " + str(len(plasmid_df)) + " (loaded)")
#Generate training and test set indexes
plasmid_index = np.arange(len(plasmid_df), dtype=np.int)
plasmid_train_index = plasmid_index[:-int(len(plasmid_df) * (valid_set_size + test_set_size))]
plasmid_valid_index = plasmid_index[plasmid_train_index.shape[0]:-int(len(plasmid_df) * test_set_size)]
plasmid_test_index = plasmid_index[plasmid_train_index.shape[0] + plasmid_valid_index.shape[0]:]
#print('Training set size = ' + str(plasmid_train_index.shape[0]))
#print('Validation set size = ' + str(plasmid_valid_index.shape[0]))
#print('Test set size = ' + str(plasmid_test_index.shape[0]))
data_gens = {
gen_id : iso.DataGenerator(
idx,
{'df' : plasmid_df},
batch_size=batch_size,
inputs = [
{
'id' : 'seq',
'source_type' : 'dataframe',
'source' : 'df',
'extractor' : lambda row, index: row['padded_seq'][180 + 40: 180 + 40 + 81] + "G" * (128-81),
'encoder' : iso.OneHotEncoder(seq_length=128),
'dim' : (1, 128, 4),
'sparsify' : False
}
],
outputs = [
{
'id' : 'dummy_output',
'source_type' : 'zeros',
'dim' : (1,),
'sparsify' : False
}
],
randomizers = [],
shuffle = True if gen_id == 'train' else False
) for gen_id, idx in [('all', plasmid_index), ('train', plasmid_train_index), ('valid', plasmid_valid_index), ('test', plasmid_test_index)]
}
x_train = np.concatenate([data_gens['train'][i][0][0] for i in range(len(data_gens['train']))], axis=0)
x_test = np.concatenate([data_gens['test'][i][0][0] for i in range(len(data_gens['test']))], axis=0)
return x_train, x_test
#Specfiy problem-specific parameters
experiment_suffix = '_strong_vae_very_high_kl_epoch_35_margin_pos_2_lower_fitness'
vae_model_prefix = "vae/saved_models/vae_apa_max_isoform_doubledope_strong_cano_pas_len_128_50_epochs_very_high_kl"
vae_model_suffix = "_epoch_35"#""#
#VAE model path
saved_vae_encoder_model_path = vae_model_prefix + "_encoder" + vae_model_suffix + ".h5"
saved_vae_decoder_model_path = vae_model_prefix + "_decoder" + vae_model_suffix + ".h5"
#Padding for the VAE
vae_upstream_padding = ''
vae_downstream_padding = 'G' * 47
#VAE sequence template
vae_sequence_template = 'ATCCANNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCC' + 'G' * (128 - 81)
#VAE latent dim
vae_latent_dim = 100
#Oracle predictor model path
saved_predictor_model_path = '../../../aparent/saved_models/aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5'
#Subtring indices for VAE
vae_pwm_start = 40
vae_pwm_end = 121
#VAE parameter collection
vae_params = [
saved_vae_encoder_model_path,
saved_vae_decoder_model_path,
vae_upstream_padding,
vae_downstream_padding,
vae_latent_dim,
vae_pwm_start,
vae_pwm_end
]
#Load data set
vae_data_path = "vae/apa_doubledope_cached_set_strong_short_cano_pas.pickle"
_, x_test = load_data(vae_data_path, valid_set_size=0.005, test_set_size=0.095)
#Evaluate ELBO distribution on test set
#Load VAE models
vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
#Compute multi-sample ELBO on test set
log_mean_p_vae_test, mean_log_p_vae_test, log_p_vae_test = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test, n_samples=128)
print("mean log(likelihood) = " + str(mean_log_p_vae_test))
#Log Likelihood Plot
plot_min_val = None
plot_max_val = None
f = plt.figure(figsize=(6, 4))
log_p_vae_test_hist, log_p_vae_test_edges = np.histogram(log_mean_p_vae_test, bins=50, density=True)
bin_width_test = log_p_vae_test_edges[1] - log_p_vae_test_edges[0]
plt.bar(log_p_vae_test_edges[1:] - bin_width_test/2., log_p_vae_test_hist, width=bin_width_test, linewidth=2, edgecolor='black', color='orange')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
if plot_min_val is not None and plot_max_val is not None :
plt.xlim(plot_min_val, plot_max_val)
plt.xlabel("VAE Log Likelihood", fontsize=14)
plt.ylabel("Data Density", fontsize=14)
plt.axvline(x=mean_log_p_vae_test, linewidth=2, color='red', linestyle="--")
plt.tight_layout()
plt.show()
#Evaluate ELBO distribution on test set (training-level no. of samples)
#Load VAE models
vae_encoder_model = load_model(saved_vae_encoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
vae_decoder_model = load_model(saved_vae_decoder_model_path, custom_objects={'st_sampled_softmax':st_sampled_softmax, 'st_hardmax_softmax':st_hardmax_softmax, 'min_pred':min_pred, 'InstanceNormalization':InstanceNormalization})
#Compute multi-sample ELBO on test set
log_mean_p_vae_test, mean_log_p_vae_test, log_p_vae_test = evaluate_elbo(vae_encoder_model, vae_decoder_model, x_test, n_samples=32)
print("mean log(likelihood) = " + str(mean_log_p_vae_test))
#Log Likelihood Plot
plot_min_val = None
plot_max_val = None
f = plt.figure(figsize=(6, 4))
log_p_vae_test_hist, log_p_vae_test_edges = np.histogram(log_mean_p_vae_test, bins=50, density=True)
bin_width_test = log_p_vae_test_edges[1] - log_p_vae_test_edges[0]
plt.bar(log_p_vae_test_edges[1:] - bin_width_test/2., log_p_vae_test_hist, width=bin_width_test, linewidth=2, edgecolor='black', color='orange')
plt.xticks(fontsize=14)
plt.yticks(fontsize=14)
if plot_min_val is not None and plot_max_val is not None :
plt.xlim(plot_min_val, plot_max_val)
plt.xlabel("VAE Log Likelihood", fontsize=14)
plt.ylabel("Data Density", fontsize=14)
plt.axvline(x=mean_log_p_vae_test, linewidth=2, color='red', linestyle="--")
plt.tight_layout()
plt.show()
#Define target isoform loss function
def get_isoform_loss(target_isos, fitness_weight=2., batch_size=32, n_samples=1, n_z_samples=1, mini_batch_size=1, seq_length=205, vae_loss_mode='bound', vae_divergence_weight=1., ref_vae_log_p=-10, vae_log_p_margin=1, decoded_pwm_epsilon=10**-6, pwm_start=0, pwm_end=70, pwm_target_bits=1.8, vae_pwm_start=0, entropy_weight=0.0, entropy_loss_mode='margin', similarity_weight=0.0, similarity_margin=0.5) :
target_iso = np.zeros((len(target_isos), 1))
for i, t_iso in enumerate(target_isos) :
target_iso[i, 0] = t_iso
masked_entropy_mse = get_target_entropy_sme_masked(pwm_start=pwm_start, pwm_end=pwm_end, target_bits=pwm_target_bits)
if entropy_loss_mode == 'margin' :
masked_entropy_mse = get_margin_entropy_ame_masked(pwm_start=pwm_start, pwm_end=pwm_end, min_bits=pwm_target_bits)
pwm_sample_entropy_func = get_pwm_margin_sample_entropy_masked(pwm_start=pwm_start, pwm_end=pwm_end, margin=similarity_margin, shift_1_nt=True)
def loss_func(loss_tensors) :
_, _, _, sequence_class, pwm_logits_1, pwm_logits_2, pwm_1, pwm_2, sampled_pwm_1, sampled_pwm_2, mask, sampled_mask, iso_pred, cut_pred, iso_score_pred, cut_score_pred, vae_pwm_1, vae_sampled_pwm_1, z_mean_1, z_log_var_1, z_1, decoded_pwm_1 = loss_tensors
#Create target isoform with sample axis
iso_targets = K.constant(target_iso)
iso_true = K.gather(iso_targets, sequence_class[:, 0])
iso_true = K.tile(K.expand_dims(iso_true, axis=-1), (1, K.shape(sampled_pwm_1)[1], 1))
#Re-create iso_pred from cut_pred
#iso_pred = K.expand_dims(K.sum(cut_pred[..., 76:76+35], axis=-1), axis=-1)
#Specify costs
iso_loss = fitness_weight * K.mean(symmetric_sigmoid_kl_divergence(iso_true, iso_pred), axis=1)
#Construct VAE sequence inputs
decoded_pwm_1 = K.clip(decoded_pwm_1, decoded_pwm_epsilon, 1. - decoded_pwm_epsilon)
log_p_x_given_z_1 = K.sum(K.sum(vae_sampled_pwm_1[:, :, :, pwm_start-vae_pwm_start:pwm_end-vae_pwm_start, ...] * K.log(K.stop_gradient(decoded_pwm_1[:, :, :, pwm_start-vae_pwm_start:pwm_end-vae_pwm_start, ...])) / K.log(K.constant(10.)), axis=(-1, -2)), axis=-1)
log_p_std_normal_1 = K.sum(normal_log_prob(z_1, 0., 1.) / K.log(K.constant(10.)), axis=-1)
log_p_importance_1 = K.sum(normal_log_prob(z_1, z_mean_1, K.sqrt(K.exp(z_log_var_1))) / K.log(K.constant(10.)), axis=-1)
log_p_vae_1 = log_p_x_given_z_1 + log_p_std_normal_1 - log_p_importance_1
log_p_vae_div_n_1 = log_p_vae_1 - K.log(K.constant(n_z_samples, dtype='float32')) / K.log(K.constant(10.))
#Calculate mean ELBO across samples (log-sum-exp trick)
max_log_p_vae_1 = K.max(log_p_vae_div_n_1, axis=-1)
log_mean_p_vae_1 = max_log_p_vae_1 + K.log(K.sum(10**(log_p_vae_div_n_1 - K.expand_dims(max_log_p_vae_1, axis=-1)), axis=-1)) / K.log(K.constant(10.))
#Specify VAE divergence loss function
vae_divergence_loss = 0.
if vae_loss_mode == 'bound' :
vae_divergence_loss = vae_divergence_weight * K.mean(K.switch(log_mean_p_vae_1 < ref_vae_log_p - vae_log_p_margin, -log_mean_p_vae_1 + (ref_vae_log_p - vae_log_p_margin), K.zeros_like(log_mean_p_vae_1)), axis=1)
elif vae_loss_mode == 'penalty' :
vae_divergence_loss = vae_divergence_weight * K.mean(-log_mean_p_vae_1, axis=1)
elif vae_loss_mode == 'target' :
vae_divergence_loss = vae_divergence_weight * K.mean((log_mean_p_vae_1 - (ref_vae_log_p - vae_log_p_margin))**2, axis=1)
elif 'mini_batch_' in vae_loss_mode :
mini_batch_log_mean_p_vae_1 = K.permute_dimensions(K.reshape(log_mean_p_vae_1, (int(batch_size / mini_batch_size), mini_batch_size, n_samples)), (0, 2, 1))
mini_batch_mean_log_p_vae_1 = K.mean(mini_batch_log_mean_p_vae_1, axis=-1)
tiled_mini_batch_mean_log_p_vae_1 = K.tile(mini_batch_mean_log_p_vae_1, (mini_batch_size, 1))
if vae_loss_mode == 'mini_batch_bound' :
vae_divergence_loss = vae_divergence_weight * K.mean(K.switch(tiled_mini_batch_mean_log_p_vae_1 < ref_vae_log_p - vae_log_p_margin, -tiled_mini_batch_mean_log_p_vae_1 + (ref_vae_log_p - vae_log_p_margin), K.zeros_like(tiled_mini_batch_mean_log_p_vae_1)), axis=1)
elif vae_loss_mode == 'mini_batch_target' :
vae_divergence_loss = vae_divergence_weight * K.mean((tiled_mini_batch_mean_log_p_vae_1 - (ref_vae_log_p - vae_log_p_margin))**2, axis=1)
entropy_loss = entropy_weight * masked_entropy_mse(pwm_1, mask)
entropy_loss += similarity_weight * K.mean(pwm_sample_entropy_func(sampled_pwm_1, sampled_pwm_2, sampled_mask), axis=1)
#Compute total loss
total_loss = iso_loss + entropy_loss + vae_divergence_loss
return total_loss
return loss_func
class EpochVariableCallback(Callback):
def __init__(self, my_variable, my_func):
self.my_variable = my_variable
self.my_func = my_func
def on_epoch_end(self, epoch, logs={}):
K.set_value(self.my_variable, self.my_func(K.get_value(self.my_variable), epoch))
#Function for running GENESIS
def run_genesis(sequence_templates, loss_func, library_contexts, model_path, batch_size=32, n_samples=1, n_z_samples=1, vae_params=None, n_epochs=10, steps_per_epoch=100) :
#Build Generator Network
_, generator = build_generator(batch_size, len(sequence_templates[0]), load_generator_network, n_classes=len(sequence_templates), n_samples=n_samples, sequence_templates=sequence_templates, batch_normalize_pwm=False)
#Build Predictor Network and hook it on the generator PWM output tensor
_, sample_predictor = build_predictor(generator, load_saved_predictor(model_path, library_contexts=library_contexts), batch_size, n_samples=n_samples, eval_mode='sample')
#Build VAE model
vae_tensors = []
if vae_params is not None :
encoder_model_path, decoder_model_path, vae_upstream_padding, vae_downstream_padding, vae_latent_dim, vae_pwm_start, vae_pwm_end = vae_params
vae_tensors = build_vae(generator, encoder_model_path, decoder_model_path, batch_size=batch_size, seq_length=len(sequence_templates[0]), n_samples=n_samples, n_z_samples=n_z_samples, vae_latent_dim=vae_latent_dim, vae_upstream_padding=vae_upstream_padding, vae_downstream_padding=vae_downstream_padding, vae_pwm_start=vae_pwm_start, vae_pwm_end=vae_pwm_end)
#Build Loss Model (In: Generator seed, Out: Loss function)
_, loss_model = build_loss_model(sample_predictor, loss_func, extra_loss_tensors=vae_tensors)
#Specify Optimizer to use
opt = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
#Compile Loss Model (Minimize self)
loss_model.compile(loss=lambda true, pred: pred, optimizer=opt)
#Fit Loss Model
train_history = loss_model.fit(
[], np.ones((1, 1)),
epochs=n_epochs,
steps_per_epoch=steps_per_epoch
)
return generator, sample_predictor, train_history
#Maximize isoform proportion
sequence_templates = [
'CTTCCGATCTCTCGCTCTTTCTATGGCATTCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCTGTCGTCGTGGGTGTCGAAAATGAAATAAAACAAGTCAATTGCGTAGTTTATTCAGACGTACCCCGTGGACCTAC'
]
library_contexts = [
'doubledope'
]
margin_similarities = [
0.5
]
#Generate new random seed
print(np.random.randint(low=0, high=1000000))
#Train APA Cleavage GENESIS Network
print("Training GENESIS")
#Number of PWMs to generate per objective
batch_size = 64
mini_batch_size = 8
#Number of One-hot sequences to sample from the PWM at each grad step
n_samples = 1
#Number of VAE latent vector samples at each grad step
n_z_samples = 32#128#32
#Number of epochs per objective to optimize
n_epochs = 50#10#5#25
#Number of steps (grad updates) per epoch
steps_per_epoch = 50
seed = 104590
for class_i in range(len(sequence_templates)) :
lib_name = library_contexts[class_i].split("_")[0]
print("Library context = " + str(lib_name))
K.clear_session()
set_seed(seed)
loss = get_isoform_loss(
[1.0],
fitness_weight=0.1,#0.5,
batch_size=batch_size,
n_samples=n_samples,
n_z_samples=n_z_samples,
mini_batch_size=mini_batch_size,
seq_length=len(sequence_templates[0]),
vae_loss_mode='mini_batch_bound',#'target',
vae_divergence_weight=40.0 * 1./71.,#5.0 * 1./71.,#0.5 * 1./71.,
ref_vae_log_p=-38.807,
vae_log_p_margin=2.0,
#decoded_pwm_epsilon=0.05,
pwm_start=vae_pwm_start + 5,
pwm_end=vae_pwm_start + 5 + 71,
vae_pwm_start=vae_pwm_start,
pwm_target_bits=1.8,
entropy_weight=0.5,#0.01,
entropy_loss_mode='margin',
similarity_weight=5.0,#0.5,#5.0,
similarity_margin=margin_similarities[class_i]
)
genesis_generator, genesis_predictor, train_history = run_genesis([sequence_templates[class_i]], loss, [library_contexts[class_i]], saved_predictor_model_path, batch_size, n_samples, n_z_samples, vae_params, n_epochs, steps_per_epoch)
genesis_generator.get_layer('lambda_rand_sequence_class').function = lambda inp: inp
genesis_generator.get_layer('lambda_rand_input_1').function = lambda inp: inp
genesis_generator.get_layer('lambda_rand_input_2').function = lambda inp: inp
genesis_predictor.get_layer('lambda_rand_sequence_class').function = lambda inp: inp
genesis_predictor.get_layer('lambda_rand_input_1').function = lambda inp: inp
genesis_predictor.get_layer('lambda_rand_input_2').function = lambda inp: inp
# Save model and weights
save_dir = 'saved_models'
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_name = 'genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + '_vae_kl_generator.h5'
model_path = os.path.join(save_dir, model_name)
genesis_generator.save(model_path)
print('Saved trained model at %s ' % model_path)
model_name = 'genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + '_vae_kl_predictor.h5'
model_path = os.path.join(save_dir, model_name)
genesis_predictor.save(model_path)
print('Saved trained model at %s ' % model_path)
#Load GENESIS models and predict sample sequences
lib_name = library_contexts[0].split("_")[0]
batch_size = 64
model_names = [
'genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + '_vae_kl',
]
sequence_templates = [
'CTTCCGATCTCTCGCTCTTTCTATGGCATTCATTACTCGCATCCANNNNNNNNNNNNNNNNNNNNNNNNNAATAAANNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNNCAGCCAATTAAGCCTGTCGTCGTGGGTGTCGAAAATGAAATAAAACAAGTCAATTGCGTAGTTTATTCAGACGTACCCCGTGGACCTAC'
]
for class_i in range(len(sequence_templates)-1, 0-1, -1) :
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = model_names[class_i] + '_predictor.h5'
model_path = os.path.join(save_dir, model_name)
predictor = load_model(model_path, custom_objects={'st_sampled_softmax': st_sampled_softmax, 'st_hardmax_softmax': st_hardmax_softmax})
n = batch_size
sequence_class = np.array([0] * n).reshape(-1, 1) #np.random.uniform(-6, 6, (n, 1)) #
noise_1 = np.random.uniform(-1, 1, (n, 100))
noise_2 = np.random.uniform(-1, 1, (n, 100))
pred_outputs = predictor.predict([sequence_class, noise_1, noise_2], batch_size=batch_size)
_, _, _, optimized_pwm, _, _, _, _, _, iso_pred, cut_pred, _, _ = pred_outputs
#Plot one PWM sequence logo per optimized objective (Experiment 'Punish A-runs')
for pwm_index in range(10) :
sequence_template = sequence_templates[class_i]
pwm = np.expand_dims(optimized_pwm[pwm_index, :, :, 0], axis=0)
cut = np.expand_dims(cut_pred[pwm_index, 0, :], axis=0)
iso = np.expand_dims(np.sum(cut[:, 80: 115], axis=-1), axis=-1)
plot_seqprop_logo(pwm, iso, cut, annotate_peaks='max', sequence_template=sequence_template, figsize=(12, 1.5), width_ratios=[1, 8], logo_height=0.8, usage_unit='fraction', plot_start=70-50, plot_end=76+50, save_figs=False, fig_name='genesis_apa_max_isoform_' + str(lib_name) + experiment_suffix + "_pwm_index_" + str(pwm_index), fig_dpi=150)
```
| github_jupyter |
Subsets and Splits