code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/fakabbir/AI-Playbook/blob/master/Custom%20NER%20Training%20using%20SpaCy%20%2B%20BERT(no%20static%20vector).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="5hy-ytfLd1FT" outputId="a0137c66-8a2c-458b-dff8-915c353829e0"
# !pip install pip install -U spacy[cuda113]
# !python -m spacy download en_core_web_trf
# !python -m spacy download en_core_web_lg
# + colab={"base_uri": "https://localhost:8080/"} id="pzoB10IZhc1v" outputId="e9dce26a-9df6-45ce-f318-6b6b23708bc5"
# !pip install cupy-cuda113
# !pip3 install torch torchvision torchaudio --extra-index-url https://download.pytorch.org/whl/cu113
# + colab={"base_uri": "https://localhost:8080/"} id="tQSgmaygd7Sd" outputId="fb84358f-06e8-4dbd-8dfb-2c6724b64a4c"
from pprint import pprint
import spacy
print("Spacy version: ",spacy.__version__)
nlp = spacy.load('en_core_web_trf')
pprint("Pipelines: \n")
pprint(nlp.pipeline)
# + id="zCcqIWLweySu"
doc = nlp("Bezo founded Amazon pvt ltd in 2009")
# + colab={"base_uri": "https://localhost:8080/"} id="kSQnArCse3WS" outputId="3903c5e9-bdd0-4fe3-e4a4-89c4eb254ffc"
doc._.trf_data.tensors[0].shape
# + colab={"base_uri": "https://localhost:8080/"} id="odXLMLXAe-47" outputId="27d58010-038d-427b-fb20-8c869e6846db"
doc._.trf_data.tensors[1].shape
# + colab={"base_uri": "https://localhost:8080/"} id="LPQMEvBwtGE-" outputId="815fbd2b-c97b-4b51-a889-51e5e0ecf232"
doc._.trf_data.model_output.last_hidden_state.shape
# + colab={"base_uri": "https://localhost:8080/"} id="GI3kvPHIfpkx" outputId="8c376bf5-f5a0-470c-d57e-ead0a10d3510"
doc._.trf_data.model_output.pooler_output.shape
# + id="WGGr42MKfCM-"
#doc._.trf_data.model_output.pooler_output == doc._.trf_data.tensors[1] # Gives True
# + id="R6uZ-2Euf6Sd"
TRAIN_DATA = [
("text1", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}})
]
# + colab={"base_uri": "https://localhost:8080/"} id="senYba7tsZ1L" outputId="3c222a80-1dd9-4fed-ce63-bd7ee995e724"
# ! curl https://raw.githubusercontent.com/UBIAI/Fine_tune_BERT_with_spacy3/main/test.tsv > test.tsv
# ! curl https://raw.githubusercontent.com/UBIAI/Fine_tune_BERT_with_spacy3/main/train.tsv > train.tsv
# + colab={"base_uri": "https://localhost:8080/"} id="Aj38Ye9suRhW" outputId="a15c8258-ffda-403e-c1f6-621c1eb3d583"
for ent in doc.ents:
print(ent, ent.label_,ent.has_vector)
# + colab={"base_uri": "https://localhost:8080/"} id="IgOgbK_-T5-K" outputId="2c5a97dd-b303-47f8-9db3-7a657c8aec82"
# !python -m spacy convert train.tsv ./ -t json -n 1 -c iob
# !python -m spacy convert test.tsv ./ -t json -n 1 -c iob
# + colab={"base_uri": "https://localhost:8080/"} id="wQjp9psOUlKG" outputId="75ff61be-0f2a-4842-f02a-249342febebd"
# ls
# + colab={"base_uri": "https://localhost:8080/"} id="bnUe9msiUqX9" outputId="667ff0b9-c0ba-449a-f2a6-133c76fa247a"
# !python -m spacy convert train.json ./ -t spacy
# !python -m spacy convert test.json ./ -t spacy
# + colab={"base_uri": "https://localhost:8080/"} id="2CSTZI2XfEp8" outputId="465235e3-d34c-45b9-d280-400d9311785f"
# !nvcc --version
# + colab={"base_uri": "https://localhost:8080/"} id="F3GTVZVFUz7D" outputId="26e6d08f-de9a-465d-f98c-d09113ecec16"
# !python -m spacy init config config.cfg --lang en --pipeline ner --optimize accuracy --force
# + colab={"base_uri": "https://localhost:8080/"} id="mBWTiKixVlSV" outputId="79d4a812-08ae-4878-e503-a78eb20a25b2"
# !python -m spacy train config.cfg --paths.train ./train.spacy --paths.dev ./test.spacy --output ./
# + colab={"base_uri": "https://localhost:8080/"} id="Ab62v1TcUz-U" outputId="df73e570-5c76-4b4e-d3f5-b1a9e942de0b"
# ls
# + id="XcOQLYMeiYZf" colab={"base_uri": "https://localhost:8080/"} outputId="e4a2e012-45e3-4ddf-d691-9d08696a3a62"
nlp = spacy.load("./model-best")
text = [
'''Have 5+ years of experience focused on applied machine learning, text analytics and Natural
Language Processing.
I have managed and lead cloud based AI enabled solution’s
development from requirement gathering, architectural designing, managing multiple dev teams
and product lifecycle.
Strong computer science and programming background, with experience and proficiency in
development technologies.
'''
]
for doc in nlp.pipe(text, disable=["tagger", "parser"]):
print([(ent.text, ent.label_) for ent in doc.ents])
# + id="c2r1waRfvjg4"
l
# + colab={"base_uri": "https://localhost:8080/"} id="1bmPin3duRQx" outputId="23c8c9c0-a82b-48dc-f34b-862320b3ea1b"
# cat config.cfg
# + id="q36AbGkGuciZ"
s ='''
[paths]
train = null
dev = null
vectors = null
init_tok2vec = null
[system]
gpu_allocator = null
seed = 0
[nlp]
lang = "en"
pipeline = ["tok2vec","ner"]
batch_size = 1000
disabled = []
before_creation = null
after_creation = null
after_pipeline_creation = null
tokenizer = {"@tokenizers":"spacy.Tokenizer.v1"}
[components]
[components.ner]
factory = "ner"
incorrect_spans_key = null
moves = null
scorer = {"@scorers":"spacy.ner_scorer.v1"}
update_with_oracle_cut_size = 100
[components.ner.model]
@architectures = "spacy.TransitionBasedParser.v2"
state_type = "ner"
extra_state_tokens = false
hidden_width = 64
maxout_pieces = 2
use_upper = true
nO = null
[components.ner.model.tok2vec]
@architectures = "spacy.Tok2VecListener.v1"
width = ${components.tok2vec.model.encode.width}
upstream = "*"
[components.tok2vec]
factory = "tok2vec"
[components.tok2vec.model]
@architectures = "spacy.Tok2Vec.v2"
[components.tok2vec.model.embed]
@architectures = "spacy.MultiHashEmbed.v2"
width = ${components.tok2vec.model.encode.width}
attrs = ["NORM","PREFIX","SUFFIX","SHAPE"]
rows = [5000,2500,2500,2500]
include_static_vectors = false
[components.tok2vec.model.encode]
@architectures = "spacy.MaxoutWindowEncoder.v2"
width = 256
depth = 8
window_size = 1
maxout_pieces = 3
[corpora]
[corpora.dev]
@readers = "spacy.Corpus.v1"
path = ${paths.dev}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null
[corpora.train]
@readers = "spacy.Corpus.v1"
path = ${paths.train}
max_length = 0
gold_preproc = false
limit = 0
augmenter = null
[training]
dev_corpus = "corpora.dev"
train_corpus = "corpora.train"
seed = ${system.seed}
gpu_allocator = ${system.gpu_allocator}
dropout = 0.1
accumulate_gradient = 1
patience = 1600
max_epochs = 0
max_steps = 20000
eval_frequency = 200
frozen_components = []
annotating_components = []
before_to_disk = null
[training.batcher]
@batchers = "spacy.batch_by_words.v1"
discard_oversize = false
tolerance = 0.2
get_length = null
[training.batcher.size]
@schedules = "compounding.v1"
start = 100
stop = 1000
compound = 1.001
t = 0.0
[training.logger]
@loggers = "spacy.ConsoleLogger.v1"
progress_bar = false
[training.optimizer]
@optimizers = "Adam.v1"
beta1 = 0.9
beta2 = 0.999
L2_is_weight_decay = true
L2 = 0.01
grad_clip = 1.0
use_averages = false
eps = 0.00000001
learn_rate = 0.001
[training.score_weights]
ents_f = 1.0
ents_p = 0.0
ents_r = 0.0
ents_per_type = null
[pretraining]
[initialize]
vectors = ${paths.vectors}
init_tok2vec = ${paths.init_tok2vec}
vocab_data = null
lookups = null
before_init = null
after_init = null
[initialize.components]
[initialize.tokenizer]
'''
with open('config.cfg', 'w+') as f:
f.write(s)
# + id="Exv1FjbWvYv8"
| Custom NER Training using SpaCy + BERT(no static vector).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Conditional Probability & Bayes Rule Quiz
# imports
import pandas as pd
import numpy as np
# load dataset
df = pd.read_csv('cancer_test_data.csv')
df.head(3)
df.info()
# What proportion of patients who tested positive has cancer?
# P(cancer|positive)
# 0.342
wCancerPos = len(df[(df.has_cancer==True) & (df.test_result=='Positive')])
round(wCancerPos / (wCancerPos + nCancerPos), 3)
# What proportion of patients who tested positive doesn't have cancer?
# P(¬cancer|positive)
# 0.658
nCancerPos = len(df[(df.has_cancer==False) & (df.test_result=='Positive')])
round(nCancerPos / (wCancerPos + nCancerPos), 3)
# What proportion of patients who tested negative has cancer?
# P(cancer|negative)
# 0.014
wCancerNeg = len(df[(df.has_cancer==True) & (df.test_result=='Negative')])
round(wCancerNeg / (wCancerNeg + nCancerNeg), 3)
# What proportion of patients who tested negative doesn't have cancer?
# P(¬cancer|negative)
# 0.986
nCancerNeg = len(df[(df.has_cancer==False) & (df.test_result=='Negative')])
round(nCancerNeg / (wCancerNeg + nCancerNeg), 3)
| stats_lessons/conditional_probability_bayes_rule.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_tensorflow_p36)
# language: python
# name: conda_tensorflow_p36
# ---
# # Text processing
# This notebook is adapted from the following web sites
# * https://machinelearningmastery.com/prepare-text-data-deep-learning-keras/
import keras
from keras.preprocessing.text import text_to_word_sequence
from keras.preprocessing.text import one_hot
from keras.preprocessing.text import hashing_trick
# Split a sentence into a list of words
sentence = 'The quick brown fox jumps over the lazy dog'
words = text_to_word_sequence(sentence)
words
# A more convenient representation of a document is a sequence of integer values where each word is represented by a unique integer
# get unique words
words = set(words)
vocab_size = len(words)
# increase the vocabulary size by about 1.3 to reduce hash collisions
one_hot(sentence, round(vocab_size * 1.3))
# Choose the md5 hashing function as it is consistent across runs versus the default hash function
hashing_trick(sentence, round(vocab_size * 1.3), hash_function='md5')
# ## Tokenizer API
# The Tokenizer API is better for working with multiple text documents at once.
from keras.preprocessing.text import Tokenizer
# define 5 documents
docs = ['Well done!',
'Good work',
'Great effort',
'nice work',
'Excellent!']
# create the tokenizer
t = Tokenizer()
# fit the tokenizer on the documents
t.fit_on_texts(docs)
# number of times each word occured in all the documents
t.word_counts
# number of times each word occurs in all the documents
t.word_docs
# display the number of documents
t.document_count
# mapping of word to the index
t.word_index
# mode = binary - whether each word occurs in the document
# mode = count - the number of times the word occurs in the document
# mode = tfidf - the Term Frequency - Inverse Document Frequency scoring
# mode = freq - the frequencey of each word as a ratio of words in the document
t.texts_to_matrix(docs, mode='binary')
| code/notebooks/keras/05-Text_processing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Random Forest
# A single decision tree - tasked to learn a dataset - might not be able to perform well due to the outliers and the breadth and depth complexity of the data.
# So instead of relying on a single tree, random forests rely on a multitude of cleverly grown decision trees.
# Each tree within the forest is allowed to become highly specialised in a specific area but still retains some general knowledge about most areas. When a random forest classifies, it is actualy each tree in the forest working together to cast votes on what label they think a specific sample should be assigned.
#
# Instead of sharing the entire dataset with each decision tree, the forest performs an operation which is essential a train / test split of the training data. Each decision tree in the forest randomly samples from the overall training data set. Through doing so, each tree exist in an independent subspace and the variation between trees is controlled. This technique is known as **tree bagging, or bootstrap aggregating**.
#
# In addition to the tree bagging of training samples at the forest level, each individual decision tree further 'feature bags' at each node-branch split. This is helpful because some datasets contain a feature that is very correlated to the target (the 'y'-label). By selecting a random sampling of features every split - if such a feature were to exist - it wouldn't show up on as many branches of the tree and there would be more diversity of the features examined.
#
# Check [my post](https://mashimo.wordpress.com/2020/04/26/random-forest/) to see more details about Random Forests!
# # Human activity prediction
# As an example, we will predict human activity by looking at data from wearables.
# For this , we train a random forest against a public domain Human Activity Dataset titled *Wearable Computing: Accelerometers' Data Classification of Body Postures and Movements*, containing 165633 data points.
#
# Within the dataset, there are five target activities:
# - Sitting
# - Sitting Down
# - Standing
# - Standing Up
# - Walking
#
# These activities were captured from 30 people wearing accelerometers mounted on their waist, left thigh, right arm, and right ankle.
# ## Read the data
# The original dataset can be found on the [UCI MachineLearning Repository](https://archive.ics.uci.edu/ml/datasets/human+activity+recognition+using+smartphones)
#
# A copy can be found also here on GitHub (URL is below) and on [Kaggle](https://www.kaggle.com/uciml/human-activity-recognition-with-smartphones)
import pandas as pd
import time
# +
# Grab the DLA HAR dataset from the links above
# we assume that is stored in a dataset folder
#
# Load up the dataset into dataframe 'X'
#
X = pd.read_csv("../datasets/dataset-har-PUC-rio-ugulino.csv", sep=';', low_memory=False)
# -
X.head(2)
X.describe()
# ## Pre-processing the data
#
# What we want to do is to predict the activity class based on the accelerometer's data from the wearables.
#
# An easy way to show which rows have NaNs in them:
print (X[pd.isnull(X).any(axis=1)])
# Great, no NaNs here. Let's go on.
# +
#
# Encode the gender column: 0 as male, 1 as female
#
X.gender = X.gender.map({'Woman':1, 'Man':0})
#
# Clean up any column with commas in it
# so that they're properly represented as decimals instead
#
X.how_tall_in_meters = X.how_tall_in_meters.str.replace(',','.').astype(float)
X.body_mass_index = X.body_mass_index.str.replace(',','.').astype(float)
#
# Check data types
print (X.dtypes)
# +
# column z4 is type "object". Something is wrong with the dataset.
#
# Convert that column into numeric
# Use errors='raise'. This will alert you if something ends up being
# problematic
#
#
# INFO: There is an error raised ... you will find it if you try the method
#
# print (X[pd.isnull(X).any(axis=1)])
# 122076 --> z4 = -14420-11-2011 04:50:23.713
#
# !! The data point #122076 is a wrong coded record,
# change it or drop it before calling the to_numeric methods:
#
#X.at[122076, 'z4'] = -144 // change to correct value
# I keep this value for later and drop it from the dataset
wrongRow = X.loc[122076]
X.drop(X.index[[122076]], inplace=True)
X.z4 = pd.to_numeric(X.z4, errors='raise')
print (X.dtypes)
# everything ok now
# -
# ## Extract the target values
# +
# Activity to predict is in "class" column
# Encode 'y' value as a dummies version of dataset's "class" column
#
y = pd.get_dummies(X['class'].copy())
# this produces a 5 column wide dummies dataframe as the y value
#
# Get rid of the user and class columns in X
#
X.drop(['class','user'], axis=1, inplace=True)
print (X.head(2))
# -
print (y.head())
# ## Split the dataset into training and test
# +
#
# Split data into test / train sets
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=7)
# -
# ## Train the Random Forest model
# +
#
# Create an RForest classifier 'model'
#
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=30, max_depth= 20, random_state=0)
# -
# You can check the [SKlearn documentation](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.RandomForestClassifier.html) to see all possible parameters.
#
# The ones used here:
#
# - n_estimators: integer, optional (default=100)
# The number of trees in the forest. Note that this number changed from 10 to 100 (following the progress in computing performance and memory)
# - max_depth: integer or None, optional (default=None)
# The maximum depth of the tree. If None, then nodes are expanded until all leaves are pure or until all leaves contain less than min_samples_split samples.
# Setting a limit helps with the computing time and memory needed.Not setting a max depth will lead to have unpruned and fully grown trees which - depending on the dataset - will require large memory footprint.
# - oob_score: bool (default=False)
# Whether to use out-of-bag samples to estimate the generalization accuracy.
# - random_state: int, RandomState instance or None, optional (default=None)
# Controls both the randomness of the bootstrapping of the samples used when building trees (if bootstrap=True) and the sampling of the features to consider
#
# And other useful / important:
#
# - criterion: string, optional (default=”gini”)
# The function to measure the quality of a split. Supported criteria are “gini” for the Gini impurity and “entropy” for the information gain.
# Same as for the Trees.
# - bootstrap: boolean, optional (default=True)
# Whether bootstrap samples are used when building trees. If False, the whole datset is used to build each tree.
# +
print ("Fitting...")
s = time.time()
model.fit(X_train, y_train)
print("completed in: ", time.time() - s, "seconds")
# -
# Note that it takes a much longer time to train a forest than a single decision tree.
#
# This is the score based on the test dataset that we split earlier. Note how good it is.
# +
print ("Scoring...")
s = time.time()
score = model.score(X_test, y_test)
print ("Score: ", round(score*100, 3))
print ("Scoring completed in: ", time.time() - s)
# -
# These are the top 5 features used in the classification.
# They are all related to the movements, no gender or age.
# +
# Extract feature importances
fi = pd.DataFrame({'feature': list(X_train.columns),
'importance': model.feature_importances_}).\
sort_values('importance', ascending = False)
# Display
fi.head()
# -
# ## Example prediction
#
# Let's use the wrong row - that we extracted earlier from the dataset - as a prediction example.
# but first we need to correct it:
# +
outputClassPredictionExample = wrongRow['class']
forPredictionExample = wrongRow.drop(labels=['class','user']) # remove class and user
forPredictionExample.z4 = -144 # correct the value
print("We use this example for prediction later:")
print(forPredictionExample)
print("The class shall be: ", outputClassPredictionExample)
# -
model.predict(forPredictionExample.values.reshape(1, -1))
# Remember that these were the categories for the classes:
y_test.iloc[0]
# The fourth one is "standing up". Seems that the model predicted correctly.
# ## OutOfBag error instead of splitting into train and test
# Since each tree within the forest is only trained using a subset of the overall training set, the forest ensemble has the ability to error test itself.
# It does this by scoring each tree's predictions against that tree's out-of-bag samples. A tree's out of bag samples are those forest training samples that were withheld from a specific tree during training.
#
# One of the advantages of using the out of bag (OOB) error is that eliminates the need to split your data into a training / testing before feeding it into the forest model, since that's part of the forest algorithm. However using the OOB error metric often underestimates the actual performance improvement and the optimal number of training iterations.
modelOOB = RandomForestClassifier(n_estimators=30, max_depth= 20, random_state=0,
oob_score=True)
# +
print ("Fitting...")
s = time.time()
modelOOB.fit(X, y)
print("completed in: ", time.time() - s, "seconds")
# -
# Time needed is similar.
# Let's check the score:
# Display the OOB Score of data
scoreOOB = modelOOB.oob_score_
print ("OOB Score: ", round(scoreOOB*100, 3))
# The out-of-bag estimation is not far away from the more precise score estimated from the test dataset.
#
# And now we predict the same user's movement. Class output shall be "standing up", the fourth one
modelOOB.predict(forPredictionExample.values.reshape(1, -1))
# Yup!
| 02-Classification/forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 作业 模型持久化,提供服务,封装
#
# * 参考example3+4 和 example 5
# * 训练一个模型来预测 泰坦尼克号的 生存概率
# * 将模型封装成API,设计输入输出
# * 将API封装成docker
# * 使用python的request库调用 API 并测试性能
import pandas as pd
import numpy as np
train_data = pd.read_csv('../example3+4/titanic_train.csv', index_col=0)
train_data.head()
# +
# TODO: train a model
# +
# TODO: save the model
# +
# TODO: pack the model into docker container
# TODO: expose the model via Flask
# +
# TODO: run the docker
# -
import requests
from tqdm.auto import tqdm
from time import time
test_data = pd.read_csv('../example3+4/titanic_test.csv',index_col=0)
test_data.head()
# +
# 测试性能
start = time()
for _, row in tqdm(test_data.iterrows(), total=test_data.shape[0]):
item = row.to_dict()
# TODO: prepare the post body
body = 'TODO'
res = requests.post('[your api]',json = body)
res.raise_for_status()
end = time()
'Used', end-start, 'seconds'
| homework1/homework.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# prerequisite package imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
from solutions_biv import additionalplot_solution_1, additionalplot_solution_2
# -
# We'll continue to make use of the fuel economy dataset in this workspace.
fuel_econ = pd.read_csv('./data/fuel_econ.csv')
fuel_econ.head()
# **Task 1**: Plot the distribution of combined fuel mileage (column 'comb', in miles per gallon) by manufacturer (column 'make'), for all manufacturers with at least eighty cars in the dataset. Consider which manufacturer order will convey the most information when constructing your final plot. **Hint**: Completing this exercise will take multiple steps! Add additional code cells as needed in order to achieve the goal.
# +
# YOUR CODE HERE
# -
# run this cell to check your work against ours
additionalplot_solution_1()
# **Task 2**: Continuing on from the previous task, plot the mean fuel efficiency for each manufacturer with at least 80 cars in the dataset.
# +
# YOUR CODE HERE
# -
# run this cell to check your work against ours
additionalplot_solution_2()
| Data Visualization/2. Bivariate/Additional_Plot_Practice.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Python statistics essential training - 03_07_email
# Standard imports
import numpy as np
import scipy.stats
import pandas as pd
# +
import matplotlib
import matplotlib.pyplot as pp
import pandas.plotting
from IPython import display
from ipywidgets import interact, widgets
# %matplotlib inline
# -
import re
import mailbox
import csv
# ### How I converted my mailbox.
mbox = mailbox.mbox('Sent.mbox')
# The resulting object is array-like, with one entry per message. Each entry is dictionary like, with keys corresponding to metadata and data for each message.
mbox[0].keys()
# The easiest way to get these data into Pandas is to build a CSV file from them. We use the module `csv` to write out the CSV file as we loop over the mailbox object. We save only subject, from, to, and date, and we write a simple header at the top with the names of columns.
with open('mbox.csv', 'w') as outfile:
writer = csv.writer(outfile)
writer.writerow(['subject','from','to','date'])
for message in mbox:
writer.writerow([message['subject'], message['from'], message['to'], message['date']])
# All done! Thanks to <NAME> for inspiration with https://jellis18.github.io/post/2018-01-17-mail-analysis.
# ## Moving on!
| Python Statistics/Exercise Files/chapter3/03_08/.ipynb_checkpoints/03_07_email_begin-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: default:Python
# language: python
# name: conda-env-default-py
# ---
# + [markdown] id="DoplaB7FtIqu" tags=[]
# ML Olympiad - Quality Education
# + [markdown] heading_collapsed=true id="MYVVaNGot6G8" jp-MarkdownHeadingCollapsed=true tags=[]
# ### Import das bibliotecas utilizadas
# + executionInfo={"elapsed": 2318, "status": "ok", "timestamp": 1645038932123, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} hidden=true id="ZscxbJDttIqy" outputId="e9283bf7-1db2-4639-b32b-65fa89a86827" tags=[]
# matemática, estatística e etl
import pandas as pd
import numpy as np
from tqdm import tqdm
from math import sqrt
from scipy import stats
from scipy.stats import pearsonr
# visualização de dados
import seaborn as sns
import matplotlib.pyplot as plt
from statsmodels.graphics.gofplots import qqplot
# outros
import gc
import warnings
from pylab import rcParams
from functools import reduce
from IPython.display import Image
from IPython.core.display import HTML
# filtra warnings
warnings.filterwarnings("ignore")
# coordenadas geográficas
# #!pip install geobr
import geobr
import geopandas as gpd
# parâmetros
def jupyter_settings():
# plot de gráficos
# %matplotlib inline
# #%pylab inline
#plt.style.use( 'bmh' )
plt.rcParams['figure.figsize'] = [10, 10]
plt.rcParams['font.size'] = 30
plt.style.use('ggplot')
display( HTML( '<style>.container { width:100% !important; }</style>') )
pd.options.display.max_columns = None
pd.options.display.max_rows = None
pd.set_option("display.max_columns", 200)
pd.set_option('display.max_rows', 200)
pd.set_option( 'display.expand_frame_repr', False )
pd.set_option('display.float_format', lambda x: '%.2f' % x)
sns.set()
jupyter_settings()
# + [markdown] id="YuYc1_x0tIq0" jp-MarkdownHeadingCollapsed=true tags=[]
# ### Auxiliar Functions
# + executionInfo={"elapsed": 21559, "status": "ok", "timestamp": 1645038953679, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="xGOeRrBZtIq1" tags=[]
def reduce_mem_usage(df):
"""
iterate through all the columns of a dataframe and modify the data type to reduce memory usage.
fonte: https://www.kaggle.com/valleyzw/ubiquant-lgbm-baseline
"""
start_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage of dataframe is {:.2f} MB'.format(start_mem))
for col in tqdm([x for x in df.columns if 'NU_NOTA_' not in x]):
col_type = df[col].dtype
if col_type != object:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
else:
df[col] = df[col].astype('category')
end_mem = df.memory_usage().sum() / 1024 ** 2
print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
return df
def show_missings(df):
'''
mostra porcentagem de missings no dataframe
'''
percent_missing = df.isnull().sum() * 100 / len(df)
missing_value_df = pd.DataFrame({'column_name': df.columns,
'percent_missing': percent_missing})
missing_value_df = missing_value_df.sort_values('percent_missing', ascending=False).round(2)
return (missing_value_df)
def leitura_dados(path_train, path_test, sample_frac=-1):
'''
método para leitura dos datasets
'''
if sample_frac == -1:
df_desenv = reduce_mem_usage(pd.read_csv(path_train))
else:
df_desenv = reduce_mem_usage(pd.read_csv(path_train).sample(frac=sample_frac, random_state=42))
df_submit = reduce_mem_usage(pd.read_csv(path_test))
return (df_desenv, df_submit)
def rmse_score(true, pred):
'''
rmse score
'''
return (sqrt(mean_squared_error(true, pred)))
def map_cor_raca(cor_raca):
'''
mapeia cor/raca de acordo com o metadados fornecido
'''
if cor_raca == 0:
return 'Não informado'
elif cor_raca == 1:
return 'Branca'
elif cor_raca == 2:
return 'Preta'
elif cor_raca == 3:
return 'Parda'
elif cor_raca == 4:
return 'Amarela'
elif cor_raca == 5:
return 'Indígena'
else:
return ''
def map_estado_civil(estado_civil):
'''
mapeia estado civil de acordo com o metadados fornecido
'''
if estado_civil == 0:
return 'Não informado'
elif estado_civil == 1:
return 'Solteiro(a)'
elif estado_civil == 2:
return 'Casado(a)/Mora com companheiro(a)'
elif estado_civil == 3:
return 'Divorciado(a)/Desquitado(a)/Separado(a)'
elif estado_civil == 4:
return 'Viúvo(a)'
else:
return ''
def gerar_painel_barra(data_frame,
var,
hue,
title = '',
title_subplot_1 = '',
title_subplot_2 = '',
legend_subplot_2 = '',
xlabel = 'Quantidade',
ylabel = '',
figsize = (12, 6)
):
'''
gera gráfico de barras
'''
fig, ax = plt.subplots(1, 2, figsize = figsize)
sns.countplot(data = data_frame,
y = var,
ax = ax[0])
sns.countplot(data = data_frame,
y = var,
hue = hue,
ax = ax[1])
ax[0].set(ylabel = ylabel, xlabel = xlabel, title = title_subplot_1)
ax[1].set(ylabel = ylabel, xlabel = xlabel, title = title_subplot_2)
ax[1].legend(title = legend_subplot_2)
fig.suptitle(title)
fig.tight_layout(pad = 4)
def print_importancias_lasso(df, coef):
'''
importância das variáveis explicativas do modelo lasso
'''
for e in sorted (list(zip(list(df), coef)), key = lambda e: -abs(e[1])):
if e[1] != 0:
print('\t{}, {:.3f}'.format(e[0], e[1]))
def percentile(n):
'''
retorna percentil
'''
def percentile_(x):
return x.quantile(n)
percentile_.__name__ = 'percentile_{:2.0f}'.format(n*100)
return percentile_
def estatistica_descritiva_por_estado(df, metrica):
"Calcula alguma estatística descritiva para as notas do Enem por estado."
# provas do dataset de base
provas = ['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT', 'NU_NOTA_REDACAO']
# obtém os resultados por estado conforme medida estatística inserida
df = df.groupby(by = 'SG_UF_RESIDENCIA', as_index = False)[provas].agg(metrica)
# geolocalização
df = gpd.GeoDataFrame(pd.merge(
df,
df_estados,
left_on = 'SG_UF_RESIDENCIA',
right_on = 'abbrev_state',
how = 'inner'))
return df
def plot_mapa_estado(df, estatistica_descritiva = np.mean, title = '', cmap = 'BuPu'):
'''
gera mapa heatmap para o Brasil populado com a estatística descritiva de interesse
'''
# cria o DataFrame conforme estatística descritiva definida
df = estatistica_descritiva_por_estado(df=df, metrica = estatistica_descritiva)
# labels para o pllot
labels_provas = ['Ciências da Natureza', 'Ciências Humanas', 'Linguagens', 'Matemática', 'Redação']
# colunas referentes a prova
provas = ['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT', 'NU_NOTA_REDACAO']
# cria a figura
fig, ax = plt.subplots(1, 5, figsize = (20, 20))
# itera na lista de provas e cria o mapa
for index, prova in enumerate(provas):
df.plot(
column = prova,
cmap = cmap,
edgecolor = 'lightgray',
lw = 0.3,
ax = ax[index],
legend=True,
legend_kwds = {'shrink': 0.08}
)
# remove marcações dos eixos
ax[index].axis('off')
# labels
ax[index].set_title(labels_provas[index], fontsize = 10)
fig.suptitle(title, y = 0.6 , weight = 'bold')
fig.tight_layout(pad = 2);
def Gerar_Grafico_Economico(Coluna):
'''
Função para gerar gráfico socio economico
'''
# Posicao
Posicao_Index = 1
# Ordenando os dados
Filtro = Analise_Econominca.sort_values(by=str(Coluna))
# Tamanho da Imagem
fig, ax = plt.subplots(figsize=(18, 15))
# Cor de fundo
Cor_Fundo = "#F5F4EF"
ax.set_facecolor(Cor_Fundo)
fig.set_facecolor(Cor_Fundo)
# Paleta de Cores
Paleta_Cores = sns.color_palette('flare', 7)
# Estilo do gráfico
plt.style.use('seaborn-darkgrid')
# Incluindo o Titulo na Figura
plt.suptitle(f'Dados do Questionário Socieconômico | {Coluna}',
fontsize=22, color='#404040', fontfamily='KyivType Sans', fontweight=600 )
# Loop plotar os gráficos
for Grafico in Analise_Econominca[lista_targets]:
# Retiando qlq valor zerado
Filtro = Filtro.loc[ Filtro[Grafico] > 0 ]
# Posição do Plot
plt.subplot( 5, 1, Posicao_Index )
# Plot
sns.boxplot( data=Filtro, x=str(Coluna), y=Grafico, showmeans = True, linewidth=1, width=0.4, color=Paleta_Cores[Posicao_Index] )
sns.stripplot( data=Filtro, x=str(Coluna), y=Grafico, size=0.3, color='0.1', linewidth=0 )
# Titulo
plt.title(f'Análise {Grafico}', loc='left', fontsize=14, fontweight=200)
# Labels
plt.ylabel('Nota de 0 - 1000')
plt.xlabel('Resposta dos participantes')
# Ajustando distancias dos gráficos no relatorio
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.94, wspace=0.2, hspace=0.25);
# Troca index do Gráfico
Posicao_Index += 1
def trata_predicoes(valor):
'''
garante que valores das notas estarão sempre entre 0 e 1000
'''
if valor < 0:
return 0
elif valor > 1000:
return 1000
else:
return valor
# + [markdown] id="vfUAnGQKoTlP" jp-MarkdownHeadingCollapsed=true tags=[]
# ### _Divide and conquer, to load data._
# + executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1645038932124, "user": {"displayName": "Yan Sym", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="GtwzwHioW918" tags=[]
# Divisão das Variaveis
lista_targets = ['NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT', 'NU_NOTA_REDACAO']
Variaveis_Socio_Economico = ['Q001', 'Q002', 'Q003', 'Q004', 'Q005', 'Q006', 'Q007', 'Q008', 'Q009', 'Q010', 'Q011', 'Q012', 'Q013', 'Q014', 'Q015', 'Q016', 'Q017', 'Q018', 'Q019', 'Q020', 'Q021', 'Q022', 'Q023', 'Q024', 'Q025']
Variaveis_Atendimento_Especializado = ['IN_BAIXA_VISAO', 'IN_CEGUEIRA', 'IN_SURDEZ', 'IN_DEFICIENCIA_AUDITIVA', 'IN_SURDO_CEGUEIRA', 'IN_DEFICIENCIA_FISICA', 'IN_DEFICIENCIA_MENTAL', 'IN_DEFICIT_ATENCAO', 'IN_DISLEXIA', 'IN_DISCALCULIA', 'IN_AUTISMO', 'IN_VISAO_MONOCULAR', 'IN_OUTRA_DEF']
Variaveis_Cadastrais = ['NU_INSCRICAO', 'CO_MUNICIPIO_RESIDENCIA', 'NO_MUNICIPIO_RESIDENCIA', 'CO_UF_RESIDENCIA', 'SG_UF_RESIDENCIA', 'NU_IDADE', 'TP_SEXO', 'TP_ESTADO_CIVIL', 'TP_COR_RACA', 'TP_NACIONALIDADE', 'CO_MUNICIPIO_NASCIMENTO', 'NO_MUNICIPIO_NASCIMENTO', 'CO_UF_NASCIMENTO', 'SG_UF_NASCIMENTO', 'TP_ST_CONCLUSAO', 'TP_ANO_CONCLUIU', 'TP_ESCOLA', 'TP_ENSINO', 'IN_TREINEIRO']
# lista de variáveis explicativas para utilizar no modelo
lista_vars_explicativas = ['Q001','Q002', 'Q003', 'Q004', 'Q005', 'Q006', 'Q007', 'Q008', 'Q009', 'Q010', 'Q011','Q012', 'Q013',
'Q014', 'Q015', 'Q016', 'Q017', 'Q018', 'Q019', 'Q020', 'Q021','Q022', 'Q023', 'Q024', 'Q025',
'IN_ACESSO', 'TP_ANO_CONCLUIU', 'TP_LINGUA', 'TP_SEXO', 'TP_DEPENDENCIA_ADM_ESC',
'NU_IDADE', 'TP_ESCOLA', 'TP_COR_RACA', 'TP_ST_CONCLUSAO', 'IN_LIBRAS',
'CO_MUNICIPIO_RESIDENCIA', 'CO_ESCOLA', 'CO_MUNICIPIO_PROVA', 'CO_MUNICIPIO_PROVA',
'TP_ENSINO', 'SG_UF_PROVA', 'TP_ESTADO_CIVIL', 'TP_NACIONALIDADE',
'IN_SEM_RECURSO', 'IN_SALA_ESPECIAL', 'SG_UF_NASCIMENTO', 'SG_UF_ESC',
'IN_TREINEIRO', 'IN_DEFICIT_ATENCAO', 'TP_SIT_FUNC_ESC',
'CO_MUNICIPIO_ESC', 'IN_LEDOR', 'IN_TEMPO_ADICIONAL',
'IN_DEFICIENCIA_AUDITIVA', 'TP_LOCALIZACAO_ESC', 'IN_DEFICIENCIA_MENTAL',
'IN_SURDEZ', 'IN_AUTISMO', 'IN_DEFICIENCIA_FISICA', 'IN_TRANSCRICAO',
'CO_MUNICIPIO_NASCIMENTO', 'CO_MUNICIPIO_ESC', 'CO_UF_NASCIMENTO', 'CO_UF_PROVA',
'IN_MAQUINA_BRAILE', 'TP_PRESENCA_MT', 'TP_PRESENCA_LC',
'TP_PRESENCA_CN', 'TP_PRESENCA_CH', 'TP_STATUS_REDACAO']
df_estados = geobr.read_state(year = 2019)
# + [markdown] id="dlu6udautIq2" jp-MarkdownHeadingCollapsed=true tags=[]
# ### Receive the Data
# + executionInfo={"elapsed": 80162, "status": "ok", "timestamp": 1645039033824, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="uoLFUvHDtIq4" outputId="4a425033-64d9-4d96-cf52-eccabbc2c1b3" tags=[]
path_input_train= '../data/external/train.csv'
path_input_test= '../data/external/test.csv'
path_output_submission= '../data/external/sample_submission.csv'
# + tags=[]
# leitura dos dados (amostra de 5% para a base de treino)
df_desenv, df_submit = leitura_dados(path_input_train, path_input_test, sample_frac=0.05)
print (df_desenv.shape, df_submit.shape)
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# # 1.0. Step 01 - Data Description
# + tags=[]
df1 = df_desenv.copy()
# + executionInfo={"elapsed": 15, "status": "ok", "timestamp": 1645039033824, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="iVlQ5_xntIq5" outputId="25be1659-2bde-46ea-f628-9c3bdc047eb7" tags=[]
# dataframe de desenvolvimento
#df_desenv
# dataframe de submissão
#df_submit
# + [markdown] id="M329znaktIq6" tags=[]
# ## 1.1. Data Dimensions
# + executionInfo={"elapsed": 1302, "status": "ok", "timestamp": 1645039035639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="l0A25o8LtIq6" outputId="2782d6c4-7ddb-4548-9bca-d215ef732c91" tags=[]
# descritivo das variáveis do dataset
df1.describe()
# + executionInfo={"elapsed": 25, "status": "ok", "timestamp": 1645039035639, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="kTj7mfH2wrG2" outputId="ec26b4c9-8afd-463c-f03b-abc85805137c" tags=[]
# informações sobre o dataset
df1.info()
# -
print(f'Total de linhas {df1.shape[0]} e Total de colunas {df1.shape[1]}.')
# + [markdown] tags=[]
# ## 1.2. Data Type
# + executionInfo={"elapsed": 18, "status": "ok", "timestamp": 1645039035640, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="Oiora5TctIq7" outputId="cc3eb906-db6d-4fb2-ac51-0f99d3c91b6e" tags=[]
# tipo das variáveis do dataset
df1.dtypes
# + executionInfo={"elapsed": 16, "status": "ok", "timestamp": 1645039035640, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="XLZImXSGtIq7" outputId="34ccc9f0-cb35-40f1-d089-b0a716602502" tags=[]
# lista de colunas do dataframe
df1.columns.tolist()
# + executionInfo={"elapsed": 13, "status": "ok", "timestamp": 1645039035641, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="XJTE5x2otIq7" outputId="708e97b6-cfb5-430c-d941-70e18ca98be2" tags=[]
# colunas do target
df1[lista_targets].head()
# + [markdown] tags=[]
# ## 1.3. Check NA
# + executionInfo={"elapsed": 12, "status": "ok", "timestamp": 1645039035642, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="435uQJJ1tIq8" outputId="663d7271-aa6a-404c-b18a-e8bb4df5f856" tags=[]
# porcentagem de dados faltantes (missings) no dataset
show_missings(df1)
# + tags=[]
# Total de NA's por coluna
df1.isna().sum()
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# # 2.0 EDA
# -
df2 = df1.copy()
# + executionInfo={"elapsed": 249, "status": "ok", "timestamp": 1645039035880, "user": {"displayName": "Yan Sym", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="UklMCz4_w2AM" outputId="085d1ec0-da92-47ae-cd80-e003a8ff2df0" tags=[]
# quantidade de valores únicos em cada coluna
df2.nunique()
# -
# Analysis of metrics
num_attributes = df2.select_dtypes( include=['int8', 'int32', 'float32', 'float64'] )
cat_attributes = df2.select_dtypes( exclude=['int8', 'int32', 'float32', 'float64'] )
# + tags=[]
# Central tendency - mean and median
ct1 = pd.DataFrame( num_attributes.apply( np.mean )).T
ct2 = pd.DataFrame( num_attributes.apply( np.median )).T
# Dispersion - std, min, max, range, skew, kurtosis
d1 = pd.DataFrame( num_attributes.apply( np.std )).T
d2 = pd.DataFrame( num_attributes.apply( min )).T
d3 = pd.DataFrame( num_attributes.apply( max )).T
d4 = pd.DataFrame( num_attributes.apply( lambda x: x.max() - x.min() )).T
d5 = pd.DataFrame( num_attributes.apply( lambda x: x.skew() )).T
d6 = pd.DataFrame( num_attributes.apply( lambda x: x.kurtosis() )).T
# Concatenate
m = pd.concat( [d2, d3, d4, ct1, ct2, d1, d5, d6] ).T.reset_index()
# Rename columns
m.columns = ( ['attributes', 'min', 'max', 'mean', 'median', 'range', 'std', 'skew', 'kurtosis',] )
m
# + executionInfo={"elapsed": 30, "status": "ok", "timestamp": 1645039036408, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="0WD6Fi9YY7Y7" outputId="f682b83e-c53c-4948-ca18-b51e0ed92855" tags=[]
# Analise de outliers entre as notas do Enem
fig, ax = plt.subplots(figsize = (35, 18))
sns.boxplot(data = df2[lista_targets], color='#6B62CE', ax = ax);
# + executionInfo={"elapsed": 529, "status": "ok", "timestamp": 1645039036407, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="y0t2P-NHa9Cp" outputId="7e082524-aacf-434b-e116-dea3e3f2567c" tags=[]
# Correlação entre as notas das provas do Enem
fig, ax = plt.subplots()
corr_provas = df2[lista_targets].corr()
sns.heatmap(corr_provas, annot=True, ax = ax);
# + executionInfo={"elapsed": 5, "status": "ok", "timestamp": 1645039035880, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="Vgk0Aj55a1Da" outputId="ac999ac5-0104-4aa2-c7f3-1f9aa20baa83" tags=[]
# frequência dos inscritos no Enem por UF
df2['SG_UF_RESIDENCIA'].value_counts(dropna=False, ascending=False, normalize=True)
# + executionInfo={"elapsed": 1516, "status": "ok", "timestamp": 1645039038309, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="CAxmqEqYY_L7" outputId="2ec6850d-56b4-4f99-aba7-588f63220d2f" tags=[]
# Analisando Notas por Estado (UF)
Analise_Target = df2[['SG_UF_RESIDENCIA', 'NU_NOTA_CN', 'NU_NOTA_CH', 'NU_NOTA_LC', 'NU_NOTA_MT', 'NU_NOTA_REDACAO']]
# Criando o relátorio
fig, axs = plt.subplots(3, 2, figsize=(15, 10))
# Plotando as notas em histogramas
sns.histplot(data=Analise_Target, x='NU_NOTA_CN', color='blue', bins=100, ax=axs[0, 0])
sns.histplot(data=Analise_Target, x='NU_NOTA_CH', color='olive', bins=100, ax=axs[0, 1])
sns.histplot(data=Analise_Target, x='NU_NOTA_LC', color='brown', bins=100, ax=axs[1, 0])
sns.histplot(data=Analise_Target, x='NU_NOTA_MT', color='red', bins=100, ax=axs[1, 1])
sns.histplot(data=Analise_Target, x='NU_NOTA_REDACAO', color='olive', bins=50, ax=axs[2, 0])
axs[2, 1].set_axis_off()
# Incluindo o Titulo na Figura
plt.suptitle('Distribuição das notas para cada prova', fontsize=22, color='#404040', fontweight=600);
# + executionInfo={"elapsed": 497, "status": "ok", "timestamp": 1645039038797, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="yHEqzvPIQCGF" outputId="cc9471b7-6401-4ba4-f12d-f72595824cb3" tags=[]
# Idade
print ('Idade média:', round(df2.query("NU_IDADE == NU_IDADE")['NU_IDADE'].mean(), 2))
print ('Idade mediana:', round(df2.query("NU_IDADE == NU_IDADE")['NU_IDADE'].median(), 2))
print ('Idade mais frequente (moda):', round(df2.query("NU_IDADE == NU_IDADE")['NU_IDADE'].mode()[0], 2))
plt.subplots(figsize=(10, 6))
plt.hist(df2['NU_IDADE'], color='#6B62CE', bins=50);
plt.title('Perfil etário dos inscritos no Enem');
# + executionInfo={"elapsed": 8, "status": "ok", "timestamp": 1645039038799, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="Yf6pcrTfP5cu" outputId="01180109-f3e0-472c-de25-2311765af039" tags=[]
# Gênero
plt.subplots(figsize=(10, 6))
sns.countplot(x='TP_SEXO', data=df2, palette="ch:.36")
plt.title('Distribuição de Gênero dos inscritos');
# + executionInfo={"elapsed": 459, "status": "ok", "timestamp": 1645039039252, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="vdUTcE5DP5fm" outputId="f36169a8-1498-4df1-c19b-757c3e257067" tags=[]
# Cor da pele
df2['MAP_TP_COR_RACA'] = df2['TP_COR_RACA'].apply(map_cor_raca)
gerar_painel_barra(df2, 'MAP_TP_COR_RACA', 'TP_SEXO',
title = 'Perfil de cor e raça dos inscritos',
title_subplot_1 = 'Cor/raça',
title_subplot_2 = 'Cor/raça por gênero',
legend_subplot_2 = 'Gênero',
ylabel = 'Cor/raça');
# + executionInfo={"elapsed": 989, "status": "ok", "timestamp": 1645039040238, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="PllgfBhQTj9k" outputId="fefadf75-105e-405b-ddb6-5853e8ae5e1d" tags=[]
# Estado civil
df2['MAP_TP_ESTADO_CIVIL'] = df2['TP_ESTADO_CIVIL'].apply(map_estado_civil)
fig, ax = plt.subplots(1,2, figsize = (30, 13))
sns.countplot(data = df2, y = 'MAP_TP_ESTADO_CIVIL',palette="ch:.36", ax = ax[0])
sns.countplot(data = df2, y = 'MAP_TP_ESTADO_CIVIL',palette="ch:.36", hue = 'TP_SEXO', ax = ax[1])
ax[0].set(ylabel = 'Estado Civil', xlabel = 'Quantidade',title = 'Estado civil')
ax[1].set(ylabel = 'Estado Civil', xlabel = 'Quantidade',title = 'Estado civil por gênero')
ax[1].legend(title = 'Gênero')
fig.suptitle('Estado civil dos inscritos')
fig.tight_layout(pad = 3);
# + [markdown] jp-MarkdownHeadingCollapsed=true tags=[]
# # 3.0. Feature Engineering
# -
df3 = df2.copy()
# + executionInfo={"elapsed": 582, "status": "ok", "timestamp": 1645039040817, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gi7Zibf4EoA_dnczFenasKCgWHIeznw5gMIk2rimg=s64", "userId": "05586637033429412239"}, "user_tz": 180} id="K6VJlMMVuycT" outputId="d7cce0b9-7177-49da-9ccd-e4ebda6d7448" tags=[]
# Calcula a quantidade de inscritos em cada estado (amostra)
df_inscritos_por_estado = df3.groupby(by = 'SG_UF_RESIDENCIA')[['SG_UF_RESIDENCIA']].count()\
.rename(columns = {'SG_UF_RESIDENCIA': 'quantidade_inscritos'})\
.reset_index()\
.sort_values(by = 'quantidade_inscritos', ascending = False)
# + tags=[]
df_inscritos_por_estado.head(27)
| notebooks/c01_ml_olympiad_eda_feature_eng.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python391jvsc74a57bd063fd5069d213b44bf678585dea6b12cceca9941eaf7f819626cde1f2670de90d
# ---
# ___
# # Stochastic Gradient Descent (SGD_v2)
# ## Loss function, optimizer, learning rate & batch size
# ## Fuel dataset
# ___
# ## Importing dependencies and transforming the data
# +
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import make_column_transformer, make_column_selector
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
data = 'C:/Users/crist/Desktop/Python_Stuff/fuel.csv'
fuel = pd.read_csv(data)
X = fuel.copy()
y = X.pop('FE')
preprocessor = make_column_transformer (
(StandardScaler(), make_column_selector(dtype_include = np.number)),
(OneHotEncoder(sparse = False),
make_column_selector(dtype_include = object))
)
X = preprocessor.fit_transform(X)
y = np.log(y)
input_shape = [X.shape[1]]
# -
# Original data
fuel.head()
# Processed features
pd.DataFrame(X[:5,:]).head()
# ## Defining the network for the model
model = keras.Sequential([
layers.Dense(128, activation = 'relu', input_shape = input_shape),
layers.Dense(128, activation = 'relu'),
layers.Dense(64, activation = 'relu'),
layers.Dense(1)
])
# ## Adding Loss and Optimizer
model.compile(
optimizer = 'adam',
loss = 'mae'
)
# ## Training model
history = model.fit(
X, y,
batch_size= 128,
epochs = 200)
# ## Checking the training loss curve
history_df = pd.DataFrame(history.history)
# Started at epoch 50, it can be changed to get other views
history_df.loc[50:, ['loss']].plot()
| Jupyter/SGD_v2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="YjKuNPUV6sAk" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
# + id="oTgCvIUE7Mt6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="979227a6-39aa-43e3-d5f6-df2cc2e31f7a" executionInfo={"status": "ok", "timestamp": 1581637315579, "user_tz": -60, "elapsed": 551, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# cd "/content/drive/My Drive/Colab Notebooks/dw_matrix"
# + id="-a_GZ3Jh7imL" colab_type="code" colab={}
# ls data
# + id="H1c6gFqG7jVm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="a8d98c7e-81d0-4690-aae6-84f92b376281" executionInfo={"status": "ok", "timestamp": 1581637380273, "user_tz": -60, "elapsed": 2406, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# ls
# + id="cNG4fjXj7x2T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="45f58fcb-9e3f-474d-90af-e76a85ccb7ee" executionInfo={"status": "ok", "timestamp": 1581638152903, "user_tz": -60, "elapsed": 2869, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# ls data
# + id="JIdEazs4-uYF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="d3801bde-c652-48c5-85d3-32fcc18201cc" executionInfo={"status": "ok", "timestamp": 1581638347614, "user_tz": -60, "elapsed": 2152, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df=pd.read_csv('data/men_shoes.csv', low_memory=False)
df.shape
# + id="JnoNduD2_XQS" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 233} outputId="6a33d392-0ec6-4e60-a9c7-2a73edaaef63" executionInfo={"status": "ok", "timestamp": 1581638356777, "user_tz": -60, "elapsed": 1231, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.columns
# + id="yAy_JDGA_gaA" colab_type="code" colab={}
mean_price = np.mean( df['prices_amountmin'])
# + id="5kOcRxIo_uJt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5f45c8e7-2b9a-4284-a6cb-5aef299eff8a" executionInfo={"status": "ok", "timestamp": 1581638416627, "user_tz": -60, "elapsed": 793, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
mean_price
# + id="w7X5ntYD_vV8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e0de145d-71f2-4858-9013-d486729824c2" executionInfo={"status": "ok", "timestamp": 1581638473609, "user_tz": -60, "elapsed": 925, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
[3]*5
# + id="X5SD0Ri2_6Mk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 233} outputId="fcd75a71-323a-41a7-f3fd-b8f699251ad2" executionInfo={"status": "ok", "timestamp": 1581638613654, "user_tz": -60, "elapsed": 584, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df['prices_amountmin']
# + id="kfl8tYESAE-y" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="e7fa87b9-fa11-4927-8156-c8645c937e18" executionInfo={"status": "ok", "timestamp": 1581638645562, "user_tz": -60, "elapsed": 815, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
y_true = df['prices_amountmin']
y_pred = [mean_price] * y_true.shape[0]
mean_absolute_error(y_true,y_pred)
# + id="s3DxJsClAnPI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="c3313f49-a349-4789-81da-0bf6bff8f18f" executionInfo={"status": "ok", "timestamp": 1581638699524, "user_tz": -60, "elapsed": 920, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df['prices_amountmin'].hist(bins=100)
# + id="lI7nxGStA0Yw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="eb662b37-b8d3-4204-d30c-0eab43f85855" executionInfo={"status": "ok", "timestamp": 1581638773021, "user_tz": -60, "elapsed": 926, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
np.log1p( df['prices_amountmin'] ).hist(bins=100)
# + id="gA-LCSH7BGVB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="ede8a246-01cd-49b2-bfb4-a28aed187bd5" executionInfo={"status": "ok", "timestamp": 1581638837254, "user_tz": -60, "elapsed": 716, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
y_true = df['prices_amountmin']
y_pred = [np.median(y_true)] * y_true.shape[0]
mean_absolute_error(y_true,y_pred)
# + id="tSn3nKeIBWEA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="50e69c4a-b9e3-42b6-bcad-59488fbd66c9" executionInfo={"status": "ok", "timestamp": 1581639302992, "user_tz": -60, "elapsed": 918, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
y_true = df['prices_amountmin']
price_log_mean = np.expm1(np.mean( np.log1p(y_true)))
y_pred = [price_log_mean] * y_true.shape[0]
mean_absolute_error(y_true,y_pred)
# + id="vMpEqNvxBxvz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 233} outputId="dbd1b07d-2d4b-4de3-ec20-0960c84039f4" executionInfo={"status": "ok", "timestamp": 1581639334931, "user_tz": -60, "elapsed": 601, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.columns
# + id="f9124pj1COPH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 233} outputId="7db5614a-93e5-40b5-88e8-c0ef356f27ac" executionInfo={"status": "ok", "timestamp": 1581639413005, "user_tz": -60, "elapsed": 713, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.brand.value_counts()
# + id="-iQrHfkvCXSt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5ab98ba6-10b3-49e7-aa7c-e8a20ca6f9e4" executionInfo={"status": "ok", "timestamp": 1581639499640, "user_tz": -60, "elapsed": 615, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df['brand'].factorize()[0]
# + id="fmzDHPK_Dyy_" colab_type="code" colab={}
df['brand_cat'] = df['brand'].factorize()[0]
# + id="pYEntWvqEIXe" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9e1a7f61-9dfa-4c02-d678-8038d05cdb21" executionInfo={"status": "ok", "timestamp": 1581639935867, "user_tz": -60, "elapsed": 637, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
feats = ['brand_cat']
X = df[ feats ].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error')
np.mean(scores), np.std(scores)
# + id="7wrG5DroFYqp" colab_type="code" colab={}
def run_model(feats):
X = df[ feats ].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="tZuS8v3KGcji" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="473329d8-e4fc-4c4e-97d7-60097209110c" executionInfo={"status": "ok", "timestamp": 1581640247996, "user_tz": -60, "elapsed": 595, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
run_model(['brand_cat'])
# + id="q7aZHRDGGhjp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 233} outputId="f2b8c8f3-3270-41b6-fddc-e3270fb7ac4c" executionInfo={"status": "ok", "timestamp": 1581640518115, "user_tz": -60, "elapsed": 407, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.prices_offer.value_counts()
# + id="dqHlP_IDHwgA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2e029e0d-ea25-4918-8a7f-e9b92379a688" executionInfo={"status": "ok", "timestamp": 1581640608335, "user_tz": -60, "elapsed": 697, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df['prices_offer'].factorize()[0]
# + id="9eNNMu90IGdQ" colab_type="code" colab={}
df['prices_offer_cat'] = df['prices_offer'].factorize()[0]
# + id="iF8d2dsEIS_M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="63955400-078e-4f7f-9bb2-65ec02a6fd51" executionInfo={"status": "ok", "timestamp": 1581640755787, "user_tz": -60, "elapsed": 597, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
run_model(['brand_cat','prices_offer_cat'])
# + id="PLNo8SvbIqev" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 253} outputId="4474c1ac-b1dc-4e87-f535-e5f52e2d2e38" executionInfo={"status": "ok", "timestamp": 1581640845027, "user_tz": -60, "elapsed": 517, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.sizes.value_counts()
# + id="Qz884XL9JASg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="082ed833-8411-4693-cc8a-51f0d15b3888" executionInfo={"status": "ok", "timestamp": 1581640873907, "user_tz": -60, "elapsed": 552, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.prices_issale.value_counts()
# + id="uZ6Po4H7JHVM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 253} outputId="1710a067-4a3b-4d0f-b928-659851c9a0ab" executionInfo={"status": "ok", "timestamp": 1581640898491, "user_tz": -60, "elapsed": 770, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.reviews.value_counts()
# + id="WcxGfXnSJNR4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 233} outputId="2a7ce6dc-2dd7-42f3-fbae-4d3b886a1009" executionInfo={"status": "ok", "timestamp": 1581640918581, "user_tz": -60, "elapsed": 567, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
df.manufacturer.value_counts()
# + id="1q67uaB_JSO_" colab_type="code" colab={}
df['manufacturer_cat'] = df['manufacturer'].factorize()[0]
# + id="FNIRet3RJiIM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b6c926b2-86c2-442f-a0c6-8b78efad7f51" executionInfo={"status": "ok", "timestamp": 1581641014255, "user_tz": -60, "elapsed": 607, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
run_model(['brand_cat','manufacturer_cat'])
# + id="V6SKqXggJplP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="2548b437-18c2-4896-cf28-e625e336d63a" executionInfo={"status": "ok", "timestamp": 1581641084592, "user_tz": -60, "elapsed": 2709, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# ls
# + id="lCJ-bI7yJ6PD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9efa9888-c19a-408e-aedd-25a3a1387257" executionInfo={"status": "ok", "timestamp": 1581641169887, "user_tz": -60, "elapsed": 641, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# cd ..
# + id="BWJatIlLKPYX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="c1b022df-3861-4a4e-9487-794d5581d35d" executionInfo={"status": "ok", "timestamp": 1581641174760, "user_tz": -60, "elapsed": 2298, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# ls
# + id="qbfZzPThKQWb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="be4839f6-0cb3-477e-ad1b-1d77c655fba9" executionInfo={"status": "ok", "timestamp": 1581641451602, "user_tz": -60, "elapsed": 2605, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# !git add 'Colab Notebooks'/day4.ipynb
# + id="oM8wEnxRKV0n" colab_type="code" colab={}
#GITHUB_TOKEN = "<KEY>"
#GITHUB_URL = 'https://{0}@github.com/Rosenzweigova/dw_matrix.git'.format(GITHUB_TOKEN)
# + id="EN2sA2IPLYgu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5585c13d-a1d6-455d-ee4c-1741b14af0d7" executionInfo={"status": "ok", "timestamp": 1581641927869, "user_tz": -60, "elapsed": 777, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
#GITHUB_URL
# + id="SJliRHmhNIlg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="13b7d14e-5c25-4c3d-de4e-9257644a93db" executionInfo={"status": "ok", "timestamp": 1581642119009, "user_tz": -60, "elapsed": 2140, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# ls
# + id="cVjTm4UxNjtR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="082ae419-9c10-4eab-f23e-5c5f0a001292" executionInfo={"status": "ok", "timestamp": 1581642144437, "user_tz": -60, "elapsed": 557, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# cd dw_matrix/
# + id="LuhVYuH4N9hB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="4f28106b-6e73-4008-d429-23edf3ec81c0" executionInfo={"status": "ok", "timestamp": 1581642181409, "user_tz": -60, "elapsed": 3074, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# ls
# + id="O011SDewOF6J" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="51ba214a-14dc-4f75-c1d6-5d8248bfe667" executionInfo={"status": "ok", "timestamp": 1581642194567, "user_tz": -60, "elapsed": 554, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# cd ..
# + id="RNtZKoNkOJvc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="e05eba12-5fdc-46a4-cf02-a3421f765cd1" executionInfo={"status": "ok", "timestamp": 1581642198550, "user_tz": -60, "elapsed": 2402, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# ls
# + id="57le1rqEOKJo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 53} outputId="8e29c1f0-4fb0-48a0-ca13-8b474b0c17af" executionInfo={"status": "ok", "timestamp": 1581642215991, "user_tz": -60, "elapsed": 2185, "user": {"displayName": "Petunia", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mClzTaaNHS18QfHz36hJJS-daXf-TrZecTz0EmXfQ=s64", "userId": "08423563849622157672"}}
# !git add day4.ipynb
# + id="vk6_ASIOOOls" colab_type="code" colab={}
| day4.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Pangenome Construction using Roary
#
# ## Introduction
# Given a set of genomes, the pan genome is the collection of all genes the set contains. Roary, the pan genome pipeline, takes closely related annotated genomes in GFF3 file format and calculates the pan genome.
#
# For more in depht information about Roary, please feel free to have a look the paper:
#
# > **Roary: Rapid large-scale prokaryote pan genome analysis**
# > <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
# > _Bioinformatics, 2015;31(22):3691-3693 doi:[10.1093/bioinformatics/btv421](http://bioinformatics.oxfordjournals.org/content/31/22/3691)_
#
# or visit the [Roary manual](http://sanger-pathogens.github.io/Roary/).
#
# ## Learning outcomes
# By the end of this tutorial you can expect to be able to:
#
# * Describe what a pangenome is
# * Prepare data for input to Roary
# * Perform QC on input data and understand why QC is important
# * Run Roary to create a pangenome with and without a core alignment
# * Understand the different output files produced by Roary
# * Draw a basic tree from the core gene alignment produced by Roary
# * Query the pangenome results produced by Roary
# * Use Phandango to visualise the results produced by Roary
#
# ## Tutorial sections
# This tutorial comprises the following sections:
# 1. [What is a pan genome](pan_genome.ipynb)
# 2. [Preparing the input data](prepare_data.ipynb)
# 3. [Performing QC on your data](qc.ipynb)
# 4. [Running Roary](run_roary.ipynb)
# 5. [Exploring the results](results.ipynb)
# 6. [Visualising the results with Phandango](phandango.ipynb)
#
# ## Authors
# This tutorial was created by [<NAME>](https://github.com/ssjunnebo).
#
# ## Running the commands from this tutorial
# You can run the commands in this tutorial either directly from the Jupyter notebook (if using Jupyter), or by typing the commands in your terminal window.
#
# ### Running commands on Jupyter
# If you are using Jupyter, command cells (like the one below) can be run by selecting the cell and clicking _Cell -> Run_ from the menu above or using _ctrl Enter_ to run the command. Let's give this a try by printing our working directory using the _pwd_ command and listing the files within it. Run the commands in the two cells below.
pwd
ls -l
# ### Running commands in the terminal
# You can also follow this tutorial by typing all the commands you see into a terminal window. This is similar to the "Command Prompt" window on MS Windows systems, which allows the user to type DOS commands to manage files.
#
# To get started, select the cell below with the mouse and then either press control and enter or choose Cell -> Run in the menu at the top of the page.
echo cd $PWD
# Now open a new terminal on your computer and type the command that was output by the previous cell followed by the enter key. The command will look similar to this:
#
# cd /home/manager/pathogen-informatics-training/Notebooks/ROARY/
#
# Now you can follow the instructions in the tutorial from here.
#
# ## Let’s get started!
# This tutorial assumes that you have Roary and Prokka installed on your computer. For download and installation instructions, please see:
#
# * The [Roary GitHub-page](https://github.com/sanger-pathogens/roary)
# * The [Prokka GitHub-page](https://github.com/tseemann/prokka)
#
# To check that you have installed Roary correctly, you can run the following command:
roary --help
# This should return the help message for Roary.
#
# Similarly, to check that you have installed Prokka correctly, you can run:
prokka --help
# This should return the help message for Prokka.
# To get started with the tutorial, head to the first section: [What is a pan genome](pan_genome.ipynb)
# The answers to all questions in the tutorial can be found [here](answers.ipynb).
| ROARY/index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .ps1
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PowerShell
# name: powershell
# ---
# + [markdown] azdata_cell_guid="5aa4391d-6f25-4169-9417-045034dae8b5"
# # Using the ImportExcel module to move data
# The ImportExcel module is maintained by PowerShell MVP <NAME> @dfinke on twitter.
#
# You will need to install the module from the PowerShell Gallery, if you haven't done so already.
# + azdata_cell_guid="113a36b9-4caa-45f4-b1ff-b85554f65639"
Install-Module ImportExcel
# + [markdown] azdata_cell_guid="6923c817-2c47-43a9-ad6a-65056b6a1b5b"
# Before we get started importing data from Excel into our SQL Server, we can use the `Get-ExcelSheetInfo` below to inspeact the multiple worksheets inside an Excel file.
# + azdata_cell_guid="c704e584-d2ec-4e5c-abea-1ae73db339ab"
Get-ExcelSheetInfo -Path C:\temp\AW\AdventureWorksDW2017_Dims.xlsx
# + [markdown] azdata_cell_guid="faaa225e-d284-44b7-a411-b1cd3a18c0ef"
# Use the technique below to import multiple worksheets from an Excel file, and create a new table in SQL Server for each Worksheet.
# + azdata_cell_guid="c4ea4da6-1074-4165-a3f3-3d209f372a56"
foreach ($Worksheet in Get-ExcelSheetInfo -Path C:\temp\AW\AdventureWorksDW2017_Dims.xlsx)
{
"$($Worksheet.Path) and $($Worksheet.Name)"
}
# + azdata_cell_guid="3fcd7f54-cc60-4df0-a8d8-eb4bfd6707c1"
$Worksheet
# + azdata_cell_guid="0fcd0cb2-5e91-47ca-8134-d1188d48a5e0"
,(Import-Excel -Path $Worksheet.Path -WorksheetName $Worksheet.Name) |
Write-SqlTableData -ServerInstance localhost\SQL2017 -Database BlankDB -SchemaName Excel -TableName $_.Name -Force;
# + azdata_cell_guid="e138384c-b383-419a-8e37-8f6b1a97eb39" tags=[]
foreach ($Worksheet in Get-ExcelSheetInfo -Path C:\temp\AW\AdventureWorksDW2017_Dims.xlsx)
{
,(Import-Excel -Path $Worksheet.Path -WorksheetName $Worksheet.Name) |
Write-SqlTableData -ServerInstance localhost\SQL2017 -Database BlankDB -SchemaName Excel -TableName $Worksheet.Name -Force;
}
# + azdata_cell_guid="b158be22-39f0-4f69-a4be-f94ad57f2376"
dir SQLSERVER:\SQL\localhost\SQL2017\Databases\BlankDB\Tables
# + azdata_cell_guid="07212c8b-17bf-4906-9e6a-bdc442f9633f"
dir C:\temp\AW -Filter Fact*.xlsx;
#,(Import-Excel -Path C:\temp\Excel\SysColumns_AdventureWorks2014.xlsx) |
#Write-SqlTableData -ServerInstance localhost\SQL2017 -Database BlankDB -SchemaName dbo -TableName MyOtherNewTable_fromExcel -Force
| Presentations/PowerShell-for-PowerBI/ImportData_FromExcel.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.6.1
# language: julia
# name: julia-1.6
# ---
# # Reverse time migration with COFII and AzureClusterlessHPC
#
# This notebook demonstrates how to run reverse time migration (RTM) using Chevron's COFII framework and AzureClusterlessHPC. The original notebook based on scale sets can be found [here](https://github.com/ChevronETC/Examples/blob/main/60_rtm/02_rtm_DynamicParallel.ipynb). In the following version, we use AzureClusterlessHPC's macros to offload computations to Azure Batch.
#
#
# ## Set up
#
# We start by setting the environment variables that point to our credentials and parameter files, which specifies our batch pool:
# +
# Install required packages for this example
using Pkg
Pkg.add(["DistributedArrays", "DistributedOperations", "Schedulers", "PyPlot"])
Pkg.add(["Jets", "DistributedJets", "JetPack", "JetPackDSP", "WaveFD", "JetPackWaveFD"])
# Set paths to credentials + parameters
ENV["CREDENTIALS"] = joinpath(pwd(), "../..", "credentials.json")
ENV["PARAMETERS"] = joinpath(pwd(), "parameters.json")
# Load AzureClusterlessHPC
using AzureClusterlessHPC
batch_clear();
# -
# We start the batch pool and pass it the startup script that specifies all the required COFII-related packages as specified [here](https://github.com/ChevronETC/Examples/blob/main/00_add_packages/00_add_packages.ipynb). The packages are installed on all nodes in the batch pool.
startup_script = "pool_startup_script_cofii.sh"
create_pool_and_resource_file(startup_script);
# ## Prepare velocity models
#
# Before diving into the imaging part, we load the true velocity model and create the migration velocity model by smoothing it slightly:
using Distributed, PyPlot, Jets, JetPack, JetPackDSP, Printf
# Read true velocity for data generation and migration velocity model
v_true = read!("../../data/marmousi_vp_20m_176x851.bin", Array{Float32}(undef, 176, 851))
nz,nx = 176,851
dz,dx = 10.0,10.0;
# +
ns = 21
P = JopPad(JetSpace(Float32,nz,nx), -ns:nz+ns, -ns:nx+ns, extend=true)
M = JopMix(range(P), (7,7))
R = JopPad(JetSpace(Float32,nz,nx), -ns:nz+ns, -ns:nx+ns, extend=false)
s1 = R' ∘ M ∘ P * (1 ./ v_true)
v_smooth = 1 ./(s1);
# -
# The true seismic image is obtained as the vertical difference of the true model:
D = JopDifference(JetSpace(Float32,nz,nx), 1)
r1 = D * v_smooth;
# +
vmin,vmax = extrema(v_smooth)
rmax = maximum(abs,r1)
@show vmin,vmax,rmax;
figure(figsize=(8,6)); clf()
subplot(2,1,1); imshow(v_smooth,aspect="auto",cmap="jet");
colorbar(orientation="vertical");clim(vmin,vmax);
title("True Velocity");
subplot(2,1,2); imshow(2 .* r1 ./ rmax,aspect="auto",cmap="gray");
colorbar(orientation="vertical");clim(-1,+1);
title("True Reflectivity");
tight_layout()
# -
# ## Data modeling
#
# With our true velocity model for generating seismic shot data and our migration velocity model in place, we not define our function to model the observed data that is subsequently used for imaging. First, we load all required packages and tag the expression with the `@batchdef` macro to specify that all packages, variables and functions are also defined on the remote batch workers.
@batchdef using Distributed, DistributedArrays, DistributedJets, DistributedOperations
@batchdef using Jets, JetPack, WaveFD, JetPackWaveFD, Random, LinearAlgebra;
# We enable thread pinning on the batch nodes through Julia environment variables.
# Set OMP environment variables
@batchdef begin
nthread = Sys.CPU_THREADS
ENV["OMP_DISPLAY_ENV"] = "true"
ENV["OMP_PROC_BIND"] = "close"
ENV["OMP_NUM_THREADS"] = "$(div(nthread,2))"
end;
# Next, we create a set of source locations for which the seismic data will be generated. Here, we create 20 source locations:
# Create list of shots
nshots = 20
sx = round.(Int,collect(range(0,stop=(nx-1)*dx,length=nshots)))
@show nshots
@show sx;
# For the modeling and imaging part, we have to supply a directory in which temporary wavefield files will be written to disk:
# Scratch space for temp files
@batchdef scratch = pwd()
@batchdef isdir(scratch);
# We now specify the number of time steps and sampling intervals for the data and for finite-difference modeling:
# Modeling parameters
@batchdef begin
ntrec = 2001
dtrec = 0.002
dtmod = 0.001
end;
# Next, we implement our forward modeling function, which takes the source number `isrc`, as well as grid parameters and a batch future `_v` of the velocity model as input arguments. The batch future can be retrieved by calling the `fetch` function on the future:
# Modeling function
@batchdef function modelshot(isrc,nz,nx,dz,dx,_v,sx)
@info "modeling shot $(isrc) on $(gethostname()) with id $(myid())..."
F = JopNlProp2DAcoIsoDenQ_DEO2_FDTD(;
b = ones(Float32,nz,nx),
nthreads = div(Sys.CPU_THREADS,2),
ntrec = ntrec,
dtrec = dtrec,
dtmod = dtmod,
dz = dz,
dx = dx,
wavelet = WaveletCausalRicker(f=10.0),
sx = sx[isrc],
sz = dz,
rx = dx*[0:1:nx-1;],
rz = 2*dz*ones(length(0:1:nx-1)),
nbz_cache = nz,
nbx_cache = 16,
comptype = UInt32,
srcfieldfile = joinpath(scratch, "field-$isrc-$(randstring()).bin"),
reportinterval=0)
d = F*fetch(_v) # read model into memory via fetch
close(F) # delete scratch files that we don't need anymore
return d
end;
# We now broadcast the true velocity model to the batch workers. The `@bcast` macro uploads the model to blob storage, creates a batch resource file and returns a batch future that can be passed to the modeling function (and avoids repeatedly copying the model to every task). We then execute the modeling function as a multi-task batch job by using the `@batchexec` macro in combination with `pmap`. The function call returns a batch controller with basic job parameters, as well as futures to the results (stored in blob).
# Generate data
_v_true = @bcast(v_true)
bctrl = @batchexec pmap(isrc -> modelshot(isrc, nz, nx, dz, dx, _v_true, sx), 1:nshots);
# We wait for all tasks to finish. Rather than fetching the result, we collect the results in the form of a `BlobFuture`, which we can subsequently pass to our RTM function. This avoids moving the modeled data between the blob storage and the local machine.
# Wait for tasks to finish
wait_for_tasks_to_complete(bctrl)
d_futures = bctrl.output
delete_job(bctrl);
# ## Reverse time migration
#
# In this section, we define our RTM function and apply it to the seismic data that we modeled in the previous section. Before imaging the data, we apply a topmute function to mute the direct wave, which is specified as follows:
# Data topmute
@batchdef function timemute!(F, d, watervel, tmute)
for i = 1:length(state(F, :rx))
rx = state(F, :rx)
rz = state(F, :rz)
sx = state(F, :sx)
sz = state(F, :sz)
dist = sqrt((sx[1] - rx[i])^2 + (sz[1] - rz[i])^2)
time = dist / watervel
tbeg = 1
tend = round(Int, (time + tmute) / state(F,:dtrec))
tend = clamp(tend,1,size(d,1))
d[tbeg:tend,i] .= 0
end
nothing
end;
# Finally, we implement our `migrateshot` function, which first models a seismic shot record for a given source index `isrc` and then images it. The function applies the time mute to the data before imaging it and returns both the image as well as an array for illumination compensation:
# Migrate shot function
@batchdef function migrateshot(isrc,nz,nx,dz,dx,_d,_v,sx)
@info "migrating shot $(isrc) on $(gethostname()) with id $(myid())..."
F = JopNlProp2DAcoIsoDenQ_DEO2_FDTD(;
b = ones(Float32,nz,nx),
nthreads = div(Sys.CPU_THREADS,2),
ntrec = ntrec,
dtrec = dtrec,
dtmod = dtmod,
dz = dz,
dx = dx,
wavelet = WaveletCausalRicker(f=10.0),
sx = sx[isrc],
sz = dz,
rx = dx*[0:1:nx-1;],
rz = 2*dz*ones(length(0:1:nx-1)),
nbz_cache = nz,
nbx_cache = 16,
comptype = UInt32,
srcfieldfile = joinpath(scratch, "field-$isrc-$(randstring()).bin"),
reportinterval=0)
d = fetch(_d)
v = reshape(fetch(_v), nz, nx, 1)
timemute!(F, d, 1500, 2/16) # mute out the direct and diving waves
J = jacobian!(F, v)
illum = srcillum(J)
m = J'*d
close(F) # delete scratch files that we don't need anymore
return m, illum
end;
# Before evaluating our function, we broadcast the migration velocity model to the batch workers:
_v_smooth = @bcast(v_smooth);
# As before, we use the `@batchexec` macro in combination with Julia's `pmap` function, to run our RTM function as a multi-task Azure Batch job:
# Run RTM
bctrl = @batchexec pmap(isrc -> migrateshot(isrc, nz, nx, dz, dx, d_futures[isrc], _v_smooth, sx), 1:nshots);
# Next, we wait for the batch job to finish and collect the results. We can fetch and sum the output images by using the `fetchreduce` function, which applies a specified operation to the return arguments of the executed function:
# +
m, illum = fetchreduce(bctrl; op=+); # Blocking
# Remove singleton dimension
m = m[:,:,1]
illum = illum[:,:,1];
# -
# Before plotting the result, we apply some basic post processing to the image. This includes a vertical derivative, as well as illumination compensation and muting of the water column.
L = JopLaplacian(JetSpace(Float32,nz,nx));
# Illumination compensation
g = ([0:(nz-1);]*dz).^2 * ones(1,nx)
img = g .* (L * m) ./ (illum .+ 1e-8 * maximum(abs, illum))
@show extrema(img);
# Mute water column
img[v_true.==1500.0] .= 0
mrms1 = 3.5 * sqrt(norm(img)^2 / length(img));
# Finally, we plot the migrated image and compare it to the true reflectivity:
# +
# Plot
figure(figsize=(8,6)); clf()
subplot(2,1,1); imshow(img,aspect="auto",cmap="gray")
colorbar(orientation="vertical");clim(-mrms1,+mrms1)
title("Migration in True Velocity")
subplot(2,1,2); imshow(2 .* r1 ./ rmax,aspect="auto",cmap="gray")
colorbar(orientation="vertical");clim(-1,+1)
title("True Reflectivity")
tight_layout();
# -
# The last step is the clean up step, in which we delete the job, the temporary blob container and the batch pool:
destroy!(bctrl);
# ## Copyright
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
| examples/cofii/cofii_rtm_batch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lwhluvdemo/mydeeplearning/blob/master/Deep_Architectures.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="f3YWYg936LPq" colab_type="code" colab={}
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="B6ZrTfkk6cLR" colab_type="code" colab={}
NUM_INPUTS=100
HIDDEN_SIZE=1024
NUM_OUTPUTS=20
# + [markdown] id="OqD5-YhaMl-s" colab_type="text"
# ### 1. Logistic Regresion
# + id="NcwnkeQw6SAC" colab_type="code" colab={}
lor = nn.Sequential(
nn.Linear(NUM_INPUTS, 1),
nn.Sigmoid()
)
# + [markdown] id="AqnLN0ruMto0" colab_type="text"
# ### 2.Linear Regresion
# + id="D0Bsumng65VV" colab_type="code" colab={}
lir = nn.Sequential(
nn.Linear(NUM_INPUTS, 1)
)
# + [markdown] id="iWPTUONOMxp6" colab_type="text"
# ### 3. Softmax classifier
# + id="QnaTs6dZ8BMe" colab_type="code" colab={}
smx = nn.Sequential(
nn.Linear(NUM_INPUTS, NUM_OUTPUTS),
nn.LogSoftmax(dim=1)
)
# + [markdown] id="550SLZ4eM1pu" colab_type="text"
# ### 4. MultiLayer Perceptron
# + id="2lTm0pcd_UzE" colab_type="code" colab={}
NUM_INPUTS=100
HIDDEN_SIZE=1024
NUM_OUTPUTS=20
mlp = nn.Sequential(
nn.Linear(NUM_INPUTS, HIDDEN_SIZE),
nn.Tanh(),
nn.Linear(HIDDEN_SIZE, HIDDEN_SIZE),
nn.Tanh(),
nn.Linear(HIDDEN_SIZE, NUM_OUTPUTS),
nn.LogSoftmax(dim=1)
)
# + [markdown] id="hYHX1T_iM4IN" colab_type="text"
# ### 5. Embedding with fully connected layer
# + id="nFtBPGJ7Cc0j" colab_type="code" colab={}
VOCAB_SIZE = 10000
HIDDEN_SIZE=100
# mapping a Vocabulary of size 10.000 to HIDDEN_SIZE projections
emb_1 = nn.Linear(VOCAB_SIZE, HIDDEN_SIZE)
# + id="Cj_EhCFcFrYw" colab_type="code" outputId="a8f8b072-e23f-4e86-dbf7-b46b6fddae3d" colab={"base_uri": "https://localhost:8080/", "height": 54}
# forward example [10, 10000] tensor
code = [1] + [0] * 9999
# # copy 10 times the same code [1 0 0 0 ... 0]
x = torch.FloatTensor([code] * 10)
print('Input x tensor size: ', x.size())
y = emb_1(x)
print('Output y embedding size: ', y.size())
# + [markdown] id="XxmC8kTWM-wV" colab_type="text"
# ### 6. Embedding with Embedding layer
# + id="U8090BmHER9b" colab_type="code" colab={}
VOCAB_SIZE = 10000
HIDDEN_SIZE=100
# mapping a Vocabulary of size 10.000 to HIDDEN_SIZE projections
emb_2 = nn.Embedding(VOCAB_SIZE, HIDDEN_SIZE)
# + id="xIQmFOqUFCM_" colab_type="code" outputId="a537d62f-e28a-4dcc-c158-1f979c5be241" colab={"base_uri": "https://localhost:8080/", "height": 54}
# Just make a long tensor with zero-index
x = torch.zeros(10, 1).long()
print('Input x tensor size: ', x.size())
y = emb_2(x)
print('Output y embedding size: ', y.size())
# + [markdown] id="8XyCuG_1NAj3" colab_type="text"
# ### 7. Recurrent Neural Network
# + id="Ir2jGezQFHzC" colab_type="code" colab={}
NUM_INPUTS = 100
HIDDEN_SIZE = 512
NUM_LAYERS = 1
# define a recurrent layer
rnn = nn.RNN(NUM_INPUTS, HIDDEN_SIZE, num_layers=NUM_LAYERS)
# + id="QwrtfxJXVxpR" colab_type="code" outputId="bd16be2b-350e-47de-fb17-217d3f54d82d" colab={"base_uri": "https://localhost:8080/", "height": 54}
SEQ_LEN = 100
x = torch.randn(SEQ_LEN, 1, NUM_INPUTS)
print('Input tensor size [seq_len, bsize, hidden_size]: ', x.size())
ht, state = rnn(x, None)
print('Output tensor h[t] size [seq_len, bsize, hidden_size]: ', ht.size())
# + id="eZxdVYlqWRcH" colab_type="code" colab={}
NUM_INPUTS = 100
HIDDEN_SIZE = 512
NUM_LAYERS = 1
# define a recurrent layer, swapping batch and time axis
rnn = nn.RNN(NUM_INPUTS, HIDDEN_SIZE, num_layers=NUM_LAYERS,
batch_first=True)
# + id="gbiNHH0zWrr-" colab_type="code" outputId="3b480387-0d9e-441a-8c53-ce171faab8a9" colab={"base_uri": "https://localhost:8080/", "height": 54}
SEQ_LEN = 100
x = torch.randn(1, SEQ_LEN, NUM_INPUTS)
print('Input tensor size [bsize, seq_len, hidden_size]: ', x.size())
ht, state = rnn(x, None)
print('Output tensor h[t] size [bsize, seq_len, hidden_size]: ', ht.size())
# + id="7TZ0bj9wWw0Z" colab_type="code" outputId="ac1aebfa-da0f-4e14-c8bb-cad4f6ccefc1" colab={"base_uri": "https://localhost:8080/", "height": 54}
# let's check ht and state sizes
print('ht size: ', ht.size())
print('state size: ', state.size())
# + id="95ioy0LAXMUN" colab_type="code" outputId="78c2d160-be2c-46e6-cd43-dd7e8a68cbf0" colab={"base_uri": "https://localhost:8080/", "height": 72}
NUM_INPUTS = 100
NUM_OUTPUTS = 10
HIDDEN_SIZE = 512
SEQ_LEN = 100
NUM_LAYERS = 1
# define a recurrent layer, swapping batch and time axis and connect
# an FC layer as an output layer to build a full network
rnn = nn.RNN(NUM_INPUTS, HIDDEN_SIZE, num_layers=NUM_LAYERS,
batch_first=True)
fc = nn.Sequential(
nn.Linear(HIDDEN_SIZE, NUM_OUTPUTS),
nn.LogSoftmax(dim=2)
)
x = torch.randn(1, SEQ_LEN, NUM_INPUTS)
print('Input tensor size x: ', x.size())
ht, state = rnn(x, None)
print('Hidden tensor size ht: ', ht.size())
y = fc(ht)
print('Output tensor y size: ', y.size())
# + [markdown] id="shvFLclDNFDt" colab_type="text"
# ### 8. LSTM Recurrent Neural Network
# + id="DDzAWGL5ZEQ5" colab_type="code" outputId="5eaadaac-4dd3-42b8-d210-0cbc2cce9aab" colab={"base_uri": "https://localhost:8080/", "height": 90}
lstm = nn.LSTM(NUM_INPUTS, HIDDEN_SIZE, num_layers=NUM_LAYERS,
batch_first=True)
x = torch.randn(1, SEQ_LEN, NUM_INPUTS)
print('Input tensor size x: ', x.size())
ht, states = lstm(x, None)
hT, cT = states[0], states[1]
print('Output tensor ht size: ', ht.size())
print('Last state h[T]: ', hT.size())
print('Cell state c[T]: ', cT.size())
# + [markdown] id="-G8eHj4bNIt9" colab_type="text"
# ### 9. Convolutional Neural Network
# + id="hVv1Q-yvcvNj" colab_type="code" colab={}
NUM_CHANNELS_IN = 1
HIDDEN_SIZE = 1024
KERNEL_WIDTH = 3
# Build a one-dimensional convolutional neural layer
conv1d = nn.Conv1d(NUM_CHANNELS_IN, HIDDEN_SIZE, KERNEL_WIDTH)
# + id="UNInGheJef65" colab_type="code" outputId="b9435d6a-d09b-4479-b542-a5e25b68cd0a" colab={"base_uri": "https://localhost:8080/", "height": 54}
SEQ_LEN = 8
x = torch.randn(1, NUM_CHANNELS_IN, SEQ_LEN)
print('Input tensor size x: ', x.size())
y = conv1d(x)
print('Output tensor y size: ', y.size())
# + id="EmJ5o59BetZb" colab_type="code" outputId="d1ee1101-fbd5-48a0-8bf1-b2991d43b85a" colab={"base_uri": "https://localhost:8080/", "height": 54}
NUM_CHANNELS_IN = 1
HIDDEN_SIZE = 1024
KERNEL_WIDTH = 3
PADDING = KERNEL_WIDTH // 2 # = 1
# Build a one-dimensional convolutional neural layer
conv1d = nn.Conv1d(NUM_CHANNELS_IN, HIDDEN_SIZE, KERNEL_WIDTH,
padding=PADDING)
SEQ_LEN = 8
x = torch.randn(1, NUM_CHANNELS_IN, SEQ_LEN)
print('Input tensor size x: ', x.size())
y = conv1d(x)
print('Output tensor y size: ', y.size())
# + id="uZnAIecIfr3H" colab_type="code" outputId="affa12f5-7ca6-4b63-8423-e402953bba42" colab={"base_uri": "https://localhost:8080/", "height": 72}
NUM_CHANNELS_IN = 1
HIDDEN_SIZE = 1024
KERNEL_WIDTH = 3
# Build a one-dimensional convolutional neural layer
conv1d = nn.Conv1d(NUM_CHANNELS_IN, HIDDEN_SIZE, KERNEL_WIDTH)
SEQ_LEN = 8
PADDING = KERNEL_WIDTH - 1 # = 2
x = torch.randn(1, NUM_CHANNELS_IN, SEQ_LEN)
print('Input tensor x size: ', x.size())
xpad = F.pad(x, (PADDING, 0))
print('Input tensor after padding xpad size: ', xpad.size())
y = conv1d(xpad)
print('Output tensor y size: ', y.size())
# + [markdown] id="cz2NAKOTNOMV" colab_type="text"
# ### 10. Convolutional Neural Network as an MLP
# + id="QdAP5a4qgnLV" colab_type="code" outputId="3f3129bf-60b3-4593-ae14-58800d166ce5" colab={"base_uri": "https://localhost:8080/", "height": 54}
NUM_INPUTS = 100
HIDDEN_SIZE = 1024
NUM_OUTPUTS= 20
# MLP as a CNN
mlp = nn.Sequential(
nn.Conv1d(NUM_INPUTS, HIDDEN_SIZE, 1),
nn.Tanh(),
nn.Conv1d(HIDDEN_SIZE, HIDDEN_SIZE, 1),
nn.Tanh(),
nn.Conv1d(HIDDEN_SIZE, NUM_OUTPUTS, 1),
nn.LogSoftmax(dim=1)
)
x = torch.randn(1, 100, 1)
print('Input tensor x size: ', x.size())
y = mlp(x)
print('Output tensor y size: ', y.size())
# + [markdown] id="31XMMrLzNQoV" colab_type="text"
# ### 11. Deconvolutional Neural Network
# + id="_yOUif_eiNhF" colab_type="code" outputId="715a1cf3-5a1f-4c61-dcdf-b2f77677e137" colab={"base_uri": "https://localhost:8080/", "height": 54}
NUM_CHANNELS_IN = 1
HIDDEN_SIZE = 1
KERNEL_WIDTH = 8
STRIDE = 4
deconv = nn.ConvTranspose1d(NUM_CHANNELS_IN, HIDDEN_SIZE, KERNEL_WIDTH,
stride=STRIDE)
SEQ_LEN = 2
y = torch.randn(1, NUM_CHANNELS_IN, SEQ_LEN)
print('Input tensor y size: ', y.size())
x = deconv(y)
print('Output (interpolated) tensor x size: ', x.size())
# + [markdown] id="2KAdi7kwNbxt" colab_type="text"
# ### 12. Quasi Recurrent Neural Network
# + id="HlnG0POXlfTP" colab_type="code" outputId="a7900e10-d6f4-4265-a67c-4f0b22e84d41" colab={"base_uri": "https://localhost:8080/", "height": 54}
class fQRNNLayer(nn.Module):
def __init__(self, num_inputs, num_outputs,
kwidth=2):
super().__init__()
self.num_inputs = num_inputs
self.num_outputs = num_outputs
self.kwidth = kwidth
# double feature maps for zt and ft predictions with same conv layer
self.conv = nn.Conv1d(num_inputs, num_outputs * 2, kwidth)
def forward(self, x, state=None):
# x is [bsz, seq_len, num_inputs]
# state is [bsz, num_outputs] dimensional
# ---------- FEED FORWARD PART
# inference convolutional part
# transpose x axis first to work with CNN layer
x = x.transpose(1, 2)
pad = self.kwidth - 1
xp = F.pad(x, (pad, 0))
conv_h = self.conv(xp)
# split convolutional layer feature maps into zt (new state
# candidate) and forget activation ft
zt, ft = torch.chunk(conv_h, 2, dim=1)
# Convert forget gate into actual forget
ft = torch.sigmoid(ft)
# Convert zt into actual non-linear response
zt = torch.tanh(zt)
# ---------- SEQUENTIAL PART
# iterate through time now to make pooling
seqlen = ft.size(2)
if state is None:
# create the zero state
ht_1 = torch.zeros(ft.size(0), self.num_outputs, 1)
else:
# add the dim=2 to match 3D tensor shape
ht_1 = state.unsqueeze(2)
zts = torch.chunk(zt, zt.size(2), dim=2)
fts = torch.chunk(ft, ft.size(2), dim=2)
hts = []
for t in range(seqlen):
ht = ht_1 * fts[t] + (1 - fts[t]) * zts[t]
# transpose time, channels dims again to match RNN-like shape
hts.append(ht.transpose(1, 2))
# re-assign h[t-1] now
ht_1 = ht
# convert hts list into a 3D tensor [bsz, seq_len, num_outputs]
hts = torch.cat(hts, dim=1)
return hts, ht_1.squeeze(2)
fqrnn = fQRNNLayer(1, 100, 2)
x = torch.randn(1, 10, 1)
ht, state = fqrnn(x)
print('ht size: ', ht.size())
print('state size: ', state.size())
# + [markdown] id="jyks-SaXNgTl" colab_type="text"
# ### 13. AlexNet classifier
# + id="kI18xwc2pjnk" colab_type="code" colab={}
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = x.view(x.size(0), 256 * 6 * 6)
x = self.classifier(x)
return x
# + id="sM8v8GrFvwmw" colab_type="code" outputId="f65b90c5-538c-43f1-df2c-a0a5983f8a4f" colab={"base_uri": "https://localhost:8080/", "height": 54}
alexnet = AlexNet()
x = torch.randn(1, 3, 224, 224)
print('Input tensor x size: ', x.size())
y = alexnet(x)
print('Output tensor y size: ', y.size())
# + [markdown] id="PlRjHUfSNkVH" colab_type="text"
# ### 14. Residual connections
# + id="5kGF6Ysqwiit" colab_type="code" outputId="7ce59005-df36-44b4-a860-e2c340475ce2" colab={"base_uri": "https://localhost:8080/", "height": 54}
class ResLayer(nn.Module):
def __init__(self, num_inputs):
super().__init__()
self.num_inputs = num_inputs
num_outputs = num_inputs
self.num_outputs = num_outputs
self.conv1 = nn.Sequential(
nn.Conv2d(num_inputs, num_outputs, 3, padding=1),
nn.BatchNorm2d(num_outputs),
nn.ReLU(inplace=True)
)
self.conv2 = nn.Sequential(
nn.Conv2d(num_outputs, num_outputs, 3, padding=1),
nn.BatchNorm2d(num_outputs),
nn.ReLU(inplace=True)
)
self.out_relu = nn.ReLU(inplace=True)
def forward(self, x):
# non-linear processing trunk
conv1_h = self.conv1(x)
conv2_h = self.conv2(conv1_h)
# output is result of res connection + non-linear processing
y = self.out_relu(x + conv2_h)
return y
x = torch.randn(1, 64, 100, 100)
print('Input tensor x size: ', x.size())
reslayer = ResLayer(64)
y = reslayer(x)
print('Output tensor y size: ', y.size())
# + [markdown] id="__E0NQb9N2lW" colab_type="text"
# ### 15. Auto-Encoder Network
# + id="eF7gYhpNz3Q8" colab_type="code" outputId="ba362673-ce4a-4f15-d44b-478c8498798c" colab={"base_uri": "https://localhost:8080/", "height": 54}
class AE(nn.Module):
def __init__(self, num_inputs=784):
super().__init__()
self.encoder = nn.Sequential(
nn.Linear(num_inputs, 400),
nn.ReLU(inplace=True),
nn.Linear(400, 400),
nn.ReLU(inplace=True),
nn.Linear(400, 20)
)
self.decoder = nn.Sequential(
nn.Linear(20, 400),
nn.ReLU(inplace=True),
nn.Linear(400, 400),
nn.ReLU(inplace=True),
nn.Linear(400, num_inputs)
)
def forward(self, x):
return self.decoder(self.encoder(x))
ae = AE(784)
x = torch.randn(10, 784)
print('Input tensor x size: ', x.size())
y = ae(x)
print('Output tensor y size: ', y.size())
# + [markdown] id="anEbOUKTN-1H" colab_type="text"
# ### 16. Variational Auto-Encoder Network
# + id="drIFisw02OqR" colab_type="code" outputId="3c5cb817-0109-49b8-f02f-c20e28a36ddc" colab={"base_uri": "https://localhost:8080/", "height": 90}
# from https://github.com/pytorch/examples/blob/master/vae/main.py
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
vae = VAE()
x = torch.randn(10, 784)
print('Input tensor x size: ', x.size())
y, mu, logvar = vae(x)
print('Input tensor y size: ', y.size())
print('Mean tensor mu size: ', mu.size())
print('Covariance tensor logvar size: ', logvar.size())
# + [markdown] id="pxGOvdLVOBKk" colab_type="text"
# ### 17. Deep Convolutional Auto-Encoder with skip connections (SEGAN G)
# + id="rnHMfxwKA65l" colab_type="code" outputId="6f86b706-0fa6-4d89-e113-f77f70bd5ccb" colab={"base_uri": "https://localhost:8080/", "height": 54}
class DownConv1dBlock(nn.Module):
def __init__(self, ninp, fmap, kwidth, stride):
super().__init__()
assert stride > 1, stride
self.kwidth = kwidth
self.conv = nn.Conv1d(ninp, fmap, kwidth, stride=stride)
self.act = nn.ReLU(inplace=True)
def forward(self, x):
# calculate padding with stride > 1
pad_left = self.kwidth // 2 - 1
pad_right = self.kwidth // 2
xp = F.pad(x, (pad_left, pad_right))
y = self.act(self.conv(xp))
return y
block = DownConv1dBlock(1, 1, 31, 4)
x = torch.randn(1, 1, 4000)
print('Input tensor x size: ', x.size())
y = block(x)
print('Output tensor y size: ', y.size())
# + id="aJHX1W_VA7v0" colab_type="code" outputId="318582ba-7256-4d10-b544-47d137c4d3bd" colab={"base_uri": "https://localhost:8080/", "height": 54}
class UpConv1dBlock(nn.Module):
def __init__(self, ninp, fmap, kwidth, stride, act=True):
super().__init__()
assert stride > 1, stride
self.kwidth = kwidth
pad = max(0, (stride - kwidth) // -2)
self.deconv = nn.ConvTranspose1d(ninp, fmap, kwidth,
stride=stride,
padding=pad)
if act:
self.act = nn.ReLU(inplace=True)
def forward(self, x):
h = self.deconv(x)
if self.kwidth % 2 != 0:
# drop last item for shape compatibility with TensorFlow deconvs
h = h[:, :, :-1]
if hasattr(self, 'act'):
y = self.act(h)
else:
y = h
return y
block = UpConv1dBlock(1, 1, 31, 4)
x = torch.randn(1, 1, 1000)
print('Input tensor x size: ', x.size())
y = block(x)
print('Output tensor y size: ', y.size())
# + id="rYw0SPRd3GRs" colab_type="code" outputId="c5b8b87e-c006-4bd2-ce2c-2ff90c036534" colab={"base_uri": "https://localhost:8080/", "height": 54}
class Conv1dGenerator(nn.Module):
def __init__(self, enc_fmaps=[64, 128, 256, 512], kwidth=31,
pooling=4):
super().__init__()
self.enc = nn.ModuleList()
ninp = 1
for enc_fmap in enc_fmaps:
self.enc.append(DownConv1dBlock(ninp, enc_fmap, kwidth, pooling))
ninp = enc_fmap
self.dec = nn.ModuleList()
# revert encoder feature maps
dec_fmaps = enc_fmaps[::-1][1:] + [1]
act = True
for di, dec_fmap in enumerate(dec_fmaps, start=1):
if di >= len(dec_fmaps):
# last decoder layer has no activation
act = False
self.dec.append(UpConv1dBlock(ninp, dec_fmap, kwidth, pooling, act=act))
ninp = dec_fmap
def forward(self, x):
skips = []
h = x
for ei, enc_layer in enumerate(self.enc, start=1):
h = enc_layer(h)
if ei < len(self.enc):
skips.append(h)
# now decode
for di, dec_layer in enumerate(self.dec, start=1):
if di > 1:
# sum skip connection
skip_h = skips.pop(-1)
h = h + skip_h
h = dec_layer(h)
y = h
return y
G = Conv1dGenerator()
x = torch.randn(1, 1, 8192)
print('Input tensor x size: ', x.size())
y = G(x)
print('Output tensor y size: ', y.size())
# + [markdown] id="R0yipK1wOKIE" colab_type="text"
# ### 18. DCGAN G and D
# + id="dbXceSoO6g3l" colab_type="code" outputId="0802ee5d-25df-488f-aeea-f73c468f51ab" colab={"base_uri": "https://localhost:8080/", "height": 54}
# from https://github.com/pytorch/examples/blob/master/dcgan/main.py
class Generator(nn.Module):
def __init__(self, nc=3):
super().__init__()
nz = 100
ngf = 64
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d( ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
z = torch.randn(1, 100, 1, 1)
print('Input tensor z size: ', z.size())
G = Generator()
x = G(z)
print('Output tensor x size: ', x.size())
# + id="7xuh9ww3Inea" colab_type="code" outputId="4ea7940e-af66-48d9-dca0-c21f54131150" colab={"base_uri": "https://localhost:8080/", "height": 54}
class Discriminator(nn.Module):
def __init__(self, nc=3):
super(Discriminator, self).__init__()
ndf = 64
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input)
x = torch.randn(1, 3, 64, 64)
print('Input tensor x size: ', x.size())
D = Discriminator()
y = D(x)
print('Output tensor y size: ', y.size())
# + id="0IsQDwR5J1Ey" colab_type="code" colab={}
| Deep_Architectures.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#checking ethe current directory
import os
print(os.getcwd())
list(range(1,5))
# # -
#Setting the working directory
path = 'E:\\PYTHON'
os.chdir(path)
print(os.getcwd())
a,b,c = 5,4,'Goni'
print(a+b)
a=5
print(a, "is a type of", type(a))
# +
# Find the sum of all the multiples of 3 or 5 below 1000
sum = 0
for i in range(1,1000):
if(i % 3 ==0 or i% 5 ==0):
sum = sum + i
print(sum)
# +
# find the sum of the even-valued terms
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89
fibo = 1
prev = 0
for i in range(11):
fibo = fibo + prev
prev= fibo - prev
print(fibo)
# +
# By considering the terms in the Fibonacci sequence whose values do not exceed four million,
# find the sum of the even-valued terms.
x=1
y=2
sum=0
while(y<=4000000):
if y%2==0:
sum+=y
x,y=y,x+y
print(sum)
# -
# python
# sets
a = {5,2,4,7,9}
a.__class__
import pandas as pd
mydata = pd.read_csv('Ins.csv')
mydata
# +
import pandas as pd
mydata = pd.read_csv('Ins.csv')
# check nnumber of rows in dataframe
print(len(mydata))
# -
# check dimension of shape
mydata.shape
print(mydata.columns)
md= mydata
mydata.head()
print(mydata.describe())
#sorting the data
test_data = mydata.sort_values(by = 'eq_site_limit', ascending = False)
#test sorted data
test_data.head(6)
# +
kk = mydata.groupby(['statecode'])
kk.first()
# -
test_data['statecode'][1:3]
# counting the statecode
test_data['statecode'][test_data["statecode"]=='FL'].count()
<a href = 'https://www.facebook.com'> Facebook </a>
test_data['policyID']
mydata.info()
import pandas as pd
ABC_consumption_data = pd.read_csv('ABC_company_phone_data.csv')
TRAI_data = pd.read_csv('TRAI_data.csv')
ABC_consumption_data
TRAI_data
# +
Merged_data = pd.merge(ABC_consumption_data, TRAI_data, on= 'network', how='left')
print(len(Merged_data))
# -
# checking the no of records
ABC_consumption_data.shape
print(len(ABC_consumption_data))
merged_data_stat = Merged_data.describe()
print(merged_data_stat)
print(Merged_data['network'].value_counts())
Merged_data.head()
# + active=""
# pd.Dataframe(Merged_data['duration']).describe(percentiles=(1,0.99,0.9,0.75,0.5,0.3,0.1,0.01)
# -
duration_col_df = pd.DataFrame(Merged_data['duration'])
duration_col_df
# +
duration_mean = int(duration_col_df.mean())
IQR_duration_P75 = duration_col_df.quantile(q=0.75)
IQR_duration_P25 = duration_col_df.quantile(q=0.25)
IQR_duration = IQR_duration_P75 - IQR_duration_P25
IQR_LL = int(IQR_duration_P25 - 10*IQR_duration)
IQR_UL = int(IQR_duration_P75 + 10*IQR_duration)
# -
list(range(0,4))
# ## loc
md.loc()
md.loc[0:2,:]
md.loc[0:2,'county']
md.loc[:,['statecode','fl_site_limit']]
md.loc[1:5,'statecode':'fl_site_limit']
md.head().drop('hu_site_limit', axis=1)
md.loc[md['eq_site_limit'] == 0]
md.loc[md.eq_site_limit == 0, :]
md.loc[md.eq_site_limit == 0, 'county']
# ## iloc
md.iloc[:,[0,2]]
md.iloc[:,0:3]
list(range(1,99,12))
md.iloc[0:2,:]
# ## ix
md.ix[0:2,'statecode':'hu_site_limit']
md.loc]
# + active=""
# mydata.head()
# -
#
#
# # ****************PyCon 2019****************
# +
#Inrtoduction to the datasets
# -
#Setting working directory
import os
path = 'E:\\PyCon-2019'
os.chdir(path)
print(os.getcwd())
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
pd.__version__
td = pd.read_csv('ted.csv')
td.head(5)
td.shape
td.dtypes
td.isna().sum()
# # Which talks provokes the most online discussionsm
td.columns
td.sort_values('comments').tail()
td['comment_per_view'] = td.comments / td.views
td.sort_values('comment_per_view').tail(1)
# +
# it isn'r redable enough, so let's look for views_per_comment
# -
td['views_per_comment'] = td.views / td.comments
td.sort_values('views_per_comment').head()
# ==> 1. Consider the limitations and biases of the data when analysing it.
# ==> 2. Make you results understandable and redable regardless of yourself.
# ## Visulaize the distribution of comment in the dataset provided
td.comments.plot(kind = 'hist')
td[td.comments >= 1000].shape
# +
# There are different way you can plot this identical plot
# -
td[td.comments < 1000].comments.plot(kind = 'hist', bins = 20)
# +
# There are other ways to plot the same histogram
# -
td.query('comments < 1000').comments.plot(kind = 'hist')
# +
# Third way to plot the graph which is more redable and is the best way
# -
td.loc[td.comments < 1000, 'comments'].plot(kind = 'hist', bins = 20)
# +
#LESSON LEARNED
# ==> Choose you plot type based on the questions you're answering and the data types you're working with
# ==> Histograms are good for distributions
# ==> Bar Plots are for comparing categories
# ==> Line Plots are good for Time-Series data
# ==> Scatter plots are good for multiple numeric variables
# SO, we have to choose upon the type of data we're woring with
# ==> Use Pandas one liners to iterate through plot quickly
# Oviously Pandas is calling Matplotlib under the hood, geneally faster and easier to write pandas plots than matplotlib
# plots, DOWNSIDE being this advise is generally for exploratory plots beacuse there are limits to how much you can
# Customize it without using matplotlib directly
# ==> Try modifying the plot defaults
# coz it's rare that the dafult setting are going to give the exctly the plots that we're looking for; the most informative
# plot
# ==> Creating plots involves decision making
# You can'r show everything on visualiztion, visualization is basically a summary
# -
# # Plot the number of talks that took place each year
td.head()
td.event
td.event.sample(10)
# Not, every even has year
td.film_date.head()
# Unix date
pd.to_datetime(td.film_date ).head()
pd.to_datetime(td.film_date , unit='s').sample(6)
# +
# Looks correct datetime
# -
#Lets create a new column
td['film_datetime'] = pd.to_datetime(td.film_date , unit='s')
td[['event', 'film_datetime']].sample(6)
# +
# Let's check the column, if it's correct or not
# -
td.dtypes
td.film_datetime.dt.year
# Calculating dates is same as transforming string usinf str methods
td.event.str.lower()
td.film_datetime.dt.year.value_counts()
td.film_datetime.dt.year.value_counts().plot(kind = 'line')
# +
# Plots always plot in the order we give data to them. And here the order is random, that's why we're geting this plot.
# -
td.film_datetime.dt.year.value_counts().sort_index().plot(kind = 'line')
td.film_datetime.max()
# +
## Takeaway from the problem
#==> Read the documentation( Both dataset and Pandas)
#==> Use datetime datatypes for dates and times{ 1.works well wih plots, 2.Provides convenient attributes, 3.Pandas possess
# extensive functionality for working with timeseries data}
#==> Check your work as you go {Use random sample methods in order to spot check whether certain conversions
# are working properly rather than just looking at head or tail}
#==> Cosider excluding the data that might not be relevent to you.
# -
#
# # What were the "best" events in ted history to attend
#
#
td.dtypes
pd.to_datetime(td.film_date , unit='s').dt.year
td['event_year'] = td.film_datetime.dt.year
td.event_year.sample(8)
# +
# Since best event has highest number of talks, and highest number means highest variety. so let's see
td.event.value_counts()
# But, that's not the complete way.
# +
# Have a look at the talk quality
td.groupby('event').views.mean().sort_values().tail()
# But some of these events only have few talks in them which seem to be relevent,
# so,I want to know many talks were at each of the events
# +
# This chunk might help {now after adding count, mean it has become dataframe, so need to add 'mean' in the sort_values}
td.groupby('event').views.agg(['count', 'mean']).sort_values('mean').tail()
# Since TEDxPuget Sound has only one event, we don't know how they calculate it, still now clear what we're looking for
# -
# How about looking for the event that has most total number of views
td.groupby('event').views.agg(['count', 'mean','sum']).sort_values('sum').tail()
# +
# But again views are biased in favor of olde talks, that have had a lot of time to accumulate views
# ofcourse, talks with a lot of views aren't good talks. Maybe they're bad talks, people are sharing it to say how bad it is.
# More likely they're probably good talks, but we don't know that which the rating data might be able to help us.
# +
## Lesson to away with this
# Think creatively how you're going to use the data you have to answer your questions.(when you do that always acknowledge
# the weaknesses of the approach, so explictly say that: I summed based on views, here is the weakness of the approach
# but that's the best approach I could come up with.)
# Watch out for small sample sizes.(i.e use 'count' with 'mean' to look for meaningless 'means'). So if you're considering
# using the "groupby" select something mean, make sure that you have a lot of data that you were taking the mean of(coz it
# doesn't tell)
# -
# # Unpack the ratings data
td.ratings.head(8)
td.loc[0,'ratings']
td.ratings[0]
type(td.ratings[0])
# So this rating is basically Stringified list of dictionary
# we need to process it through ABSTRACT SYNTAX TREE(ast)
import ast
# Let's Test it...
ast.literal_eval('[1,2,3]')
type(ast.literal_eval('[1,2,3]'))
ast.literal_eval(td.loc[0, 'ratings'])
#OR
ast.literal_eval(td.ratings[0])
# Create a function
def str_to_list(ratings_str):
return ast.literal_eval(ratings_str)
str_to_list(td.ratings[0])
# Now, Let's apply it to every rating series
td.ratings.apply(str_to_list).head()
# Super Variable
td.ratings.apply(ast.literal_eval).head()
# Create rating_list as a new feature
td['ratings_list'] = td.ratings.apply(lambda x: ast.literal_eval(x))
td.ratings_list[0]
td.dtypes
# +
## LESSON LEARNED
# ==> Use apply anytime it's necessary : So, Apply is ofter discouraged coz it's slow compared to built-in pandas operaions(sometimes,
# you don't care about performance, so why not use something which is working but i would use it last rather than first. So don't use
# apply() when there is built-in functions. Built-in functions are going to be faster. They are going to be more reliable and probably
# better designed, documented and tested)
# ** The confusing thing about apply() is because there is series apply() method, there is series map() method which has some of the
# same functionality, there is dataframe apply method which is different and there is dataframe applymap which is also different.
# ==> Paying attention to datatypes
# Datatypes impacts every aspect of pandas functionality, you have to know what types of datatype you're working with all the time
# or might end up using the wrong functions.or gonna miss out to the functions that we may use.
# -
# # Count the total number of ratings received by each talk
# - new column named 'num_ratings'
#
#
#
td.ratings_list[0]
def get_num_rating(list_of_dicts):
num = 0
for d in list_of_dicts:
num = num + d['count']
return num
get_num_rating(td.ratings_list[0])
# Apply it to the whole dataset
td['num_ratings'] = td.ratings_list.apply(get_num_rating)
td['num_ratings']
pd.DataFrame(td.ratings_list[0])
pd.DataFrame(td.ratings_list[0])['count']
pd.DataFrame(td.ratings_list.index)
# Another way
pd.DataFrame(td.ratings_list[0])['count'].sum()
# +
#td.ratings_list.apply(lambda x: pd.DataFrame(td.ratings_list[x])['count'].sum() for x in range(pd.DataFrame(td.ratings_list.index)))
# -
td.num_ratings.describe()
td.ratings_list.apply(get_num_rating).to_csv('num_ratings.csv')
# Save this results
# # Which occupations delievered the funniest TED talks on Average
# - for each talk calculate the most frequent ratings
# - for each talk clean the occupation data, so that there is only one talk
# ## Step 1: Count the number of funny ratings
td.ratings_list.head()
td.ratings.str.contains('Funny').value_counts()
# +
# So Every record is having funny in them.
# -
def get_funny_ratings(list_of_dicts):
for d in list_of_dicts:
if d['name'] == 'Funny':
return d['count']
td['funny_ratings']= td.ratings_list.apply(get_funny_ratings)
td['funny_ratings']
td['funny_rate'] = td.funny_ratings / td.num_ratings
td['funny_rate'].head()
td.sort_values('funny_rate').speaker_occupation.tail(20)
# ## Step 3: Analyse the funny rate by occupation
td.groupby('speaker_occupation').funny_rate.mean().sort_values().tail()
td.speaker_occupation.describe()
# ## Step 4: Focus on occupations that are well represented in the data
td.speaker_occupation.value_counts()
occupation_counts = td.speaker_occupation.value_counts()
occupation_counts[occupation_counts >= 5]
top_occupations = occupation_counts[occupation_counts >= 5].index
type(top_occupations)
td[td.speaker_occupation.isin(top_occupations)]
ted_top_occupations = td[td.speaker_occupation.isin(top_occupations)]
ted_top_occupations.shape
ted_top_occupations.groupby('speaker_occupation').funny_rate.mean().sort_values()
# ### Weaknesses of this approach
# - five is still pretty small sample size.
# - So, here we have a Performance poet, multimedia artist who has done atleast 5 ted talks but happens to be all the same person and that
# person is funny, so it doesn't tell us that Performance poet, multimedia artist are funny
# - More than one profeession is another issue here, which might be seperated and looked out.
# +
## Lessons Takeaway
# ==> Check you assumptions about the data{ like checking if all the occupation has funny in them or not}
# ==> Check for reasonable results{ like while we create funny rate calculation,and look ou whether it makes sense and matchh with our human
# intuition}
# ==> Take advantage of the fact that pandas operations often output a series or a dataframe{because of that ,we could do chaining etc.
# i.e can do a lot of application not just on input but on output too.}
# ==> watch out for small sample size
# ==> Consider the impact of missing data{Pandas generally ignores missing values by default thus most calculations won't fail due to
# missing values but need to be cognizant of missing values coz panda doesn't generally doesn't cause pandas to throw up error; we
# need to be aware that it's doing calculations on non-missing data }
# ==> llllllllllllllllllllllllllllllllllll
| PyCon2019Playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import base64
import requests
import datetime
from urllib.parse import urlencode
client_id = ''
client_secret = ''
class SpotifyAPI(object):
access_token = None
access_token_expires = datetime.datetime.now()
access_token_did_expire = True
client_id = None
client_secret = None
token_url = "https://accounts.spotify.com/api/token"
def __init__(self, client_id, client_secret, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client_id = client_id
self.client_secret = client_secret
def get_client_credentials(self):
"""
RETURNS A BASE 64 ENCODE STRING
"""
client_id = self.client_id
client_secret = self.client_secret
if client_id == None or client_secret == None:
raise Exception("TOU MUST SET client_id AND client_secret")
client_creds= f"{client_id}:{client_secret}"
client_creds_b64 = base64.b64encode(client_creds.encode())
return client_creds_b64.decode()
def get_token_headers(self):
client_creds_b64 = self.get_client_credentials()
return {
"Authorization": f"Basic {client_creds_b64}"
}
def get_token_data(self):
return {
"grant_type": "client_credentials"
}
def perform_auth(self):
token_url = self.token_url
token_data = self.get_token_data()
token_headers = self.get_token_headers()
r = requests.post(token_url, data=token_data, headers=token_headers)
# print(r.json())
if r.status_code not in range(200, 299):
raise Exception("COULD NOT AUTHENTICATE CLIENTE")
# return False
data = r.json()
now = datetime.datetime.now()
access_token = data['access_token']
expires_in = data['expires_in']
expires = now + datetime.timedelta(seconds=expires_in)
self.access_token = access_token
self.access_token_expires = expires
self.access_token_did_expire = expires < now
# print(f"access_token -> {access_token}")
# print(f"now -> {now}")
# print(f"expires_in -> {expires_in}")
# print(f"expires -> {expires}")
return True
def get_access_token(self):
token = self.access_token
expires = self.access_token_expires
now = datetime.datetime.now()
if expires < now:
self.perform_auth()
return self.get_access_token()
elif token == None:
self.perform_auth()
return self.get_access_token()
return token
def get_resource_header(self):
access_token = self.get_access_token()
headers = {
"Authorization": f"Bearer {access_token}"
}
return headers
def get_resource(self, lookup_id, resource_type="albums", version="v1"):
endpoint = f"https://api.spotify.com/{version}/{resource_type}/{lookup_id}"
print(f"endpoint -> {endpoint}")
headers = self.get_resource_header()
print(f"headers -> {headers}")
r = requests.get(endpoint, headers=headers)
if r.status_code not in range(200, 299):
print(r.status_code)
return {}
print(r.status_code)
return r.json()
def get_album(self, _id):
return self.get_resource(_id, resource_type="albums")
def get_artist(self, _id):
return self.get_resource(_id, resource_type="artists")
def base_search(self, query_params):
headers = self.get_resource_header()
endpoint_search = "https://api.spotify.com/v1/search"
lookup_url = f"{endpoint_search}?{query_params}"
print(lookup_url)
r = requests.get(lookup_url, headers=headers)
if r.status_code not in range(200, 299):
return {}
print(r.status_code)
return r.json()
def search(self, query=None, operator=None, operator_query=None, search_type='artist' ):
if query == None:
raise Exception("A query is required")
if isinstance(query, dict):
query = " ".join([f"{k}:{v}" for k,v in query.items()])
if operator != None and operator_query != None:
if operator.lower() == "or" or operator.lower() == "not":
operator = operator.upper()
if isinstance(operator_query, str):
query = f"{query} {operator} {operator_query}"
query_params = urlencode({"q": query, "type": search_type.lower()})
print(query_params)
return self.base_search(query_params)
| n_day_python/the_spotify_api/notebooks/6 - Duplicate client.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Python Imports
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import pandas as pd
import re
from pandas.io.json import json_normalize
import json
import pandas as pd
import numpy as np
from surprise import SVD, accuracy
from surprise.model_selection import cross_validate, train_test_split
from surprise import Dataset
from surprise import Reader
from surprise.prediction_algorithms.knns import KNNWithZScore, KNNBaseline
from surprise.prediction_algorithms.matrix_factorization import NMF
from pandas.io.json import json_normalize
from pymongo import MongoClient
from collections import defaultdict
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import metrics
import re
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
from sklearn.decomposition import TruncatedSVD
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
import pickle
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import linear_kernel
from ast import literal_eval
# +
def convert_ids(ids_in_csv):
return pd.to_numeric(ids_in_csv, errors='coerce').astype('int64')
def convert_to_float(ids_in_csv):
return pd.to_numeric(ids_in_csv, errors='coerce').astype('float64')
def to_json(csv_entry):
return json.loads(re.sub('\'', '"', csv_entry))
def get_top_n(predictions, n=200):
'''SUPRISE API
Return the top-N recommendation for each user from a set of predictions.
Args:
predictions(list of Prediction objects): The list of predictions, as
returned by the test method of an algorithm.
n(int): The number of recommendation to output for each user. Default
is 10.
Returns:
A dict where keys are user (raw) ids and values are lists of tuples:
[(raw item id, rating estimation), ...] of size n.
'''
# First map the predictions to each user.
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = user_ratings[:n]
return top_n
def get_movie_name(movie_id):
return ratings_with_movie_names[ratings_with_movie_names.id == movie_id]['title'].iloc[0]
def print_user_prediction(userId, predictions_dict, meta_df):
users_viewed_movies = ratings_with_movie_names[ratings_with_movie_names['userId'] == userId][
['rating', 'original_title']]
print(f'User {userId} has viewed the following movies:\n')
for row in users_viewed_movies.itertuples():
rating = row[1]
original_title = row[2]
print(f'\t{original_title}, Rating: {rating}')
print(f'\nThe following movies are recommended for User {userId}\n')
recommended_movies = [get_movie_name(mov_id[0], meta_df) for mov_id in predictions_dict[userId]]
for movie in recommended_movies:
print(f'\t{movie}')
def get_movie_name(movie_id, movie_meta_df):
return movie_meta_df[movie_meta_df.id == movie_id]['title'].iloc[0]
def get_movie_id(title, movie_meta_df):
return movie_meta_df[movie_meta_df.title == title]['id'].iloc[0]
def get_all_movies_in_cluster(cluster_number, cluster_dict, meta_df):
movies = cluster_dict[cluster_number]
return [get_movie_name(mov, meta_df) for mov in movies]
def get_cluster_number(movie, cluster_zip):
for cluster, movie_id in cluster_zip:
if movie_id == movie:
return cluster
raise Exception('Movie not found in cluster')
# +
movies_metadata_df1 = pd.read_csv('the-movies-dataset/movies_metadata.csv'
, converters={ 'id': lambda x: convert_ids(x)
, 'imdb_id': lambda x: convert_ids(x)
,'popularity': lambda x: convert_to_float(x)
,'genres': lambda x: to_json(x)}
, usecols=['id', 'original_title'
, 'genres' #'homepage'
, 'overview', 'popularity', 'poster_path'
, 'release_date', 'revenue', 'runtime'
, 'spoken_languages', 'title'
, 'vote_average', 'vote_count']
, dtype={'populariy': np.float64}
, parse_dates=True)
movies_lookup_df = pd.read_csv('the-movies-dataset/movies_metadata.csv'
, converters={'id': lambda x: convert_ids(x), 'imdb_id': lambda x: convert_ids(x)}
,usecols=['id', 'title'])
#####################################
##SVD DATA SET
movies_df = pd.read_csv('the-movies-dataset/movies_metadata.csv'
, converters={'id': lambda x: convert_ids(x), 'imdb_id': lambda x: convert_ids(x)}
,usecols=['id', 'original_title', 'belongs_to_collection'
, 'budget', 'genres', 'homepage'
,'imdb_id', 'overview', 'popularity', 'poster_path'
, 'production_companies','release_date', 'revenue', 'runtime',
'spoken_languages', 'status', 'tagline', 'title', 'video',
'vote_average', 'vote_count'])
#####################################
ratings_df = pd.read_csv('the-movies-dataset/ratings_small.csv')
# content_filter_df = pd.read_pickle('content_filter_df.pkl')
# content_filter_df = content_filter_df[['id',
# 'popularity',
# #'release_date',
# 'vote_average',
# 'release_year',
# 0,1,2,3,4,5, 6, 7, 8,9,10,11,12,13,14,15,16,17,18,19]]
# content_filter_df = content_filter_df.dropna()
# idx = pd.Index(content_filter_df['id'])
# idx
# content_filter_df.index = idx
# -
###May need Fuzzy matching, but for now:
movies_df = movies_df[movies_df.spoken_languages == """[{'iso_639_1': 'en', 'name': 'English'}]"""]
ratings_with_movie_names = ratings_df.merge(movies_df[['id', 'original_title']], how='left', left_on='movieId', right_on='id')
ratings_with_movie_names = ratings_with_movie_names[ratings_with_movie_names.original_title.isnull() == False]
reader = Reader(rating_scale=(0, 5))
data = Dataset.load_from_df(ratings_with_movie_names[['userId', 'movieId', 'rating']], reader)
trainset = data.build_full_trainset()
testset = trainset.build_anti_testset()
# # SVD : Collaborative Filtering
# +
algo = SVD(verbose=True)
algo.fit(trainset)
cross_validate(algo, data, measures=['RMSE', 'MAE'], cv=5, n_jobs=-1, verbose=True)
# +
predictions = algo.test(testset)
### Tune this value to get fewer results faster, but less options to choose from
top_n = get_top_n(predictions)
predicted_movies_by_name = defaultdict(list)
### This builds the dictionary of predicted movies for all users
for key, value in top_n.items():
predicted_movies_by_name[key] = [get_movie_name(mov_id[0], movies_metadata_df1) for mov_id in value]
# +
from collections import namedtuple
UserFavoriteRating = namedtuple('UserFavoriteRating', ['title', 'rating'])
def users_top_n_movies(n, userId, predictions_dict, meta_df):
users_viewed_movies = ratings_with_movie_names[ratings_with_movie_names['userId'] == userId][['rating', 'original_title']]
viewed_movies = []
for row in users_viewed_movies.itertuples():
rating = row[1]
original_title = row[2]
film = UserFavoriteRating(original_title, rating)
viewed_movies.append(film)
sorted(viewed_movies, key=lambda film: film[1])
return viewed_movies[0:n]
# -
# # GET A USERS TOP RATED MOVIES
users_top_n_movies(6, 10, predicted_movies_by_name, movies_metadata_df1)
# # At this point, you should have user personas to get a pool of movies to choose from, not simply pre-made users:
print_user_prediction(47, top_n, movies_metadata_df1)
UserFavoriteRating = namedtuple('UserFavoriteRating', ['title', 'rating'])
def collab_filter_recommendations(user, top_ns, movie_meta_df):
predictions = top_ns[user]
return [UserFavoriteRating(get_movie_name(pred[0], movie_meta_df), pred[1]) for pred in predictions]
collab_filter_recommendations(47, top_n, movies_metadata_df1)
# # CONTENT FILTERING
movies_metadata_df = pd.read_csv('the-movies-dataset/movies_metadata.csv'
, converters={ 'id': lambda x: convert_ids(x)
, 'imdb_id': lambda x: convert_ids(x)
,'popularity': lambda x: convert_to_float(x)
,'genres': lambda x: to_json(x)}
, usecols=['id', 'original_title'
, 'genres' #'homepage'
, 'overview', 'popularity', 'poster_path'
, 'release_date', 'revenue', 'runtime'
, 'spoken_languages', 'title'
, 'vote_average', 'vote_count']
, dtype={'populariy': np.float64}
, parse_dates=True)
movies_metadata_df = movies_metadata_df.drop_duplicates(subset=['id'])
def get_recommendations(title, tfidf_matrix):
# Get the index of the movie that matches the title
idx = movies_df[title]
cosine_sim = linear_kernel(tfidf_matrix[idx], tfidf_matrix)
# Get the pairwsie similarity scores of all movies with that movie
sim_scores = list(enumerate(cosine_sim[0]))
pickle.dump(tfidf_matrix ,open("feature.pkl","wb"))
# Sort the movies based on the similarity scores
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
# Get the scores of the 10 most similar movies
sim_scores = sim_scores[0:21]
# Get the movie indices
movie_indices = [i[0] for i in sim_scores]
# Return the top 10 most similar movies
#print(idx, title , movies_metadata_df['title'].iloc[idx],movies_metadata_df['genres'].iloc[idx])
return movies_metadata_df['title'].iloc[movie_indices]
movies_metadata_df.shape
# +
# Load keywords and credits
credits = pd.read_csv('the-movies-dataset/credits.csv')
keywords = pd.read_csv('the-movies-dataset/keywords.csv')
credits = credits.drop_duplicates(subset=['id'])
keywords = keywords.drop_duplicates(subset=['id'])
# Convert IDs to int. Required for merging
keywords['id'] = keywords['id'].astype('int')
credits['id'] = credits['id'].astype('int')
movies_metadata_df['id'] = movies_metadata_df['id'].astype('int')
# Merge keywords and credits into your main metadata dataframe
movies_metadata_df = credits.merge(movies_metadata_df, on='id')
movies_metadata_df = movies_metadata_df.merge(keywords, on='id')
# -
movies_df = pd.Series(movies_metadata_df.index, index=movies_metadata_df['title']).drop_duplicates()
features = ['cast', 'crew', 'keywords', 'genres']
for feature in features:
movies_metadata_df[feature] = movies_metadata_df[feature].apply(literal_eval)
# +
def get_director(x):
for i in x:
if i['job'] == 'Director':
return i['name']
return np.nan
# Returns the list top 3 elements or entire list; whichever is more.
def get_list(x):
if isinstance(x, list):
names = [i['name'] for i in x]
#Check if more than 3 elements exist. If yes, return only first three. If no, return entire list.
if len(names) > 3:
names = names[:3]
return names
#Return empty list in case of missing/malformed data
return []
def clean_data(x):
if isinstance(x, list):
return [str.lower(i.replace(" ", "")) for i in x]
else:
#Check if director exists. If not, return empty string
if isinstance(x, str):
return str.lower(x.replace(" ", ""))
else:
return ''
def create_soup(x):
return ' '.join(x['keywords']) + ' ' + ' '.join(x['cast']) + ' ' + x['director'] + ' ' + ' '.join(x['genres'])
# +
# Define new director, cast, genres and keywords features that are in a suitable form.
movies_metadata_df['director'] = movies_metadata_df['crew'].apply(get_director)
features = ['cast', 'keywords', 'genres']
for feature in features:
movies_metadata_df[feature] = movies_metadata_df[feature].apply(get_list)
# +
# Apply clean_data function to your features.
features = ['cast', 'keywords', 'director', 'genres']
for feature in features:
movies_metadata_df[feature] = movies_metadata_df[feature].apply(clean_data)
# Create a new soup feature
movies_metadata_df['soup'] = movies_metadata_df.apply(create_soup, axis=1)
count = CountVectorizer(stop_words='english')
count_matrix = count.fit_transform(movies_metadata_df['soup'])
#count_matrix
cosine_sim = linear_kernel(count_matrix[0], count_matrix)
cosine_sim
# -
print(get_recommendations('Toy Story',count_matrix))
print_user_prediction(47, top_n, movies_metadata_df)
# # Graph Filter
pip install neo4j
pip install neomodel
# +
from neo4j import GraphDatabase
uri = "bolt://localhost:11002"
driver = GraphDatabase.driver(uri, auth=('neo4j', 'ayush'))
# -
session = driver.session()
query = "MATCH (n) RETURN count(n)"
no_count = session.run(query)
for i in no_count:
print(i)
print(no_count)
import json
from datetime import datetime
from pprint import pprint
import codecs
import os
import json
import logging
import random
from py2neo import Graph, Path, Node, Relationship, cypher
# +
#walk_limit defines the number of nodes it can hop
walk_limit = 5
#All types of relationships possible, will be helpful in defining probabilities
relationships = ["FAVORITE","GENRES","SIMILAR","WATCHED"]
# +
def generateRandomPath():
#query to select a random node to start with
query = '''
START t=node(*)
MATCH (a)-[]->(t)
RETURN a
SKIP {n} LIMIT 1'''
#get total number of nodes
node_count_query = "MATCH (n) RETURN count(n)"
node_count = 10371
session = driver.session()
#select a random node
#random_node = (session.run(query,{"n":random.randint(0,node_count - 1), "m":1}))
random_node = (session.run(query,{"n":862, "m":1}))
#print(type(random_node))
#x = random_node.single()[0]
# print(x)
# print(x.id)
#print(random_node.single()[0])
# for i in random_node:
# print (i)
r_node = random_node.single()[0]
return walk(r_node, 0) # start random walk and return the final node
# -
path = []
def walk(start_node, current_length, prev_node=None, max_length = walk_limit):
#print(type(start_node.labels))
if (start_node.labels == {'Movies'}):
#print(start_node)
path.append(start_node)
else:
current_length-=1
#return if path length exceed limit
if (current_length >= max_length):
return {start_node:current_length}
#query to get all incoming and outgoing paths from a node
query = """match p=(n)-[]-() where id(n)={id} return p"""
session = driver.session()
rels = session.run(query,{"id":start_node.id})
# for i in rels:
# print(i)
#Convert the output to a list
relationships = rels.data()
#print(relationships)
size = len(relationships)
#select a random node
node = random.randint(0,size-1)
next_node = relationships[node]["p"].end_node
#print(relationships[node]["p"].end_node)
current_node = relationships[node]["p"].start_node
#If next random node is similar to previous node, choose another
if(next_node.id == current_node.id and size > 1):
node = random.randint(0,size-1)
next_node = relationships[node]["p"].end_node
current_node = relationships[node]["p"].start_node
#if previous node is the only possible path, terminate
elif(next_node.id == current_node.id and size == 1):
return {current_node:current_length}
return walk(next_node, current_length + 1, current_node)
# +
print(generateRandomPath())
#for i in range(5):
for i in path:
print(i)
# -
# # GRAPH CODE HERE
# +
def get_count_first_degree_films_of(tx, title):
for record in tx.run("MATCH (origin:MOVIE)-[:APPEARED_IN]-(actor)-[:APPEARED_IN]-(first_movie:MOVIE)"
"WHERE origin.title = {title} "
"RETURN count(*)", title=title):
print(record[0])
def get_first_degree_films_of(tx, title):
nodes = []
for record in tx.run("MATCH (origin:MOVIE)-[:APPEARED_IN]-(actor)-[:APPEARED_IN]-(first_movie:MOVIE)"
"WHERE origin.title = {title} "
"RETURN first_movie", title=title):
nodes.append(record.data())
return nodes
first_degree_away = None
with driver.session() as session:
first_degree_away = session.read_transaction(get_first_degree_films_of, 'Superman')
# +
GraphMember = namedtuple('GraphMember', ['title', 'movie_id'])
def neo4j_results_to_tuples(results):
return [GraphMember(node['first_movie'].get('title'), node['first_movie'].get('movie_id')) for node in results]
# -
neo4j_results_to_tuples(first_degree_away)
def get_first_degree_films_of(tx, title):
nodes = []
for record in tx.run("MATCH (origin:MOVIE)-[:APPEARED_IN]-(actor)-[:APPEARED_IN]-(first_movie:MOVIE)"
"WHERE origin.title = {title} "
"RETURN first_movie", title=title):
nodes.append(record.data())
return nodes
# # Get First Degree Away Films of Sleepless in Seattle
first_degree_away = None
with driver.session() as session:
first_degree_away = session.read_transaction(get_first_degree_films_of, 'Sleepless in Seattle')
def neo4j_results_to_tuples(results):
return [GraphMember(node['first_movie'].get('title'), node['first_movie'].get('movie_id')) for node in results]
def get_connected_movies(list_favorite_movies, driver):
list_connected_movies = []
with driver.session() as session:
for movie in list_favorite_movies:
first_degree_away_films = session.read_transaction(get_first_degree_films_of, movie)
film_tups = neo4j_results_to_tuples(first_degree_away_films)
list_connected_movies.extend(film_tups)
return list_connected_movies
# ## Run a test with a user
user_number = 321
predictions_dict = predicted_movies_by_name
get_graph_on = users_top_n_movies(10, user_number, predictions_dict, movies_metadata_df)
get_graph_on
def get_top_three_favs(user_id):
favorite_seen_movie_array = users_top_n_movies(200, user_id, predicted_movies_by_name, movies_metadata_df1)
sorted_seen_movies = sorted(favorite_seen_movie_array, key=lambda k: k[1], reverse=True)
return sorted_seen_movies[0:3]
get_top_three_favs(47)
# # Collab Filter List
collab_filtered_movies = sorted(collab_filter_recommendations(47, top_n, movies_metadata_df1), key=lambda k: k[1], reverse=True)
collab_filtered_movies
# ## Content Filtered
# +
# #get_movie_id('The Endless Summer', movies_metadata_df1)
# content_filtered_tups = top_n_closest_content_filtered(50, 321, content_filter_df)
# content_filtered_tups
# -
content_filtered_tups = get_recommendations('<NAME>',count_matrix)
content_filtered_tups
favorite_list = get_top_three_favs(321)
movie_names = [movie[0] for movie in favorite_list]
first_degree_away_films = get_connected_movies([mov[0] for mov in top_three], driver)
graph_id_content_filter = set({movie[1] for movie in first_degree_away})
graph_name_collab_filter = set({movie[0] for movie in first_degree_away})
content_id_set = set(movie[0] for movie in content_filtered_tups)
collab_movie_name_set = set(movie[0] for movie in collab_filtered_movies)
graph_name_collab_filter & collab_movie_name_set
content_id_set & graph_id_content_filter
get_movie_name(550, movies_metadata_df1)
| notebook/Hybrid Filtering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# # Parameter selection, Validation, and Testing
# Most models have parameters that influence how complex a model they can learn. Remember using `KNeighborsRegressor`.
# If we change the number of neighbors we consider, we get a smoother and smoother prediction:
# <img src="figures/plot_kneigbors_regularization.png" width="100%">
# In the above figure, we see fits for three different values of ``n_neighbors``.
# For ``n_neighbors=2``, the data is overfit, the model is too flexible and can adjust too much to the noise in the training data. For ``n_neighbors=20``, the model is not flexible enough, and can not model the variation in the data appropriately.
#
# In the middle, for ``n_neighbors = 5``, we have found a good mid-point. It fits
# the data fairly well, and does not suffer from the overfit or underfit
# problems seen in the figures on either side. What we would like is a
# way to quantitatively identify overfit and underfit, and optimize the
# hyperparameters (in this case, the polynomial degree d) in order to
# determine the best algorithm.
#
# We trade off remembering too much about the particularities and noise of the training data vs. not modeling enough of the variability. This is a trade-off that needs to be made in basically every machine learning application and is a central concept, called bias-variance-tradeoff or "overfitting vs underfitting".
# <img src="figures/overfitting_underfitting_cartoon.svg" width="100%">
#
# ## Hyperparameters, Over-fitting, and Under-fitting
#
# Unfortunately, there is no general rule how to find the sweet spot, and so machine learning practitioners have to find the best trade-off of model-complexity and generalization by trying several hyperparameter settings. Hyperparameters are the internal knobs or tuning parameters of a machine learning algorithm (in contrast to model parameters that the algorithm learns from the training data -- for example, the weight coefficients of a linear regression model); the number of *k* in K-nearest neighbors is such a hyperparameter.
#
# Most commonly this "hyperparameter tuning" is done using a brute force search, for example over multiple values of ``n_neighbors``:
#
# +
from sklearn.model_selection import cross_val_score, KFold
from sklearn.neighbors import KNeighborsRegressor
# generate toy dataset:
x = np.linspace(-3, 3, 100)
rng = np.random.RandomState(42)
y = np.sin(4 * x) + x + rng.normal(size=len(x))
X = x[:, np.newaxis]
cv = KFold(shuffle=True)
# for each parameter setting do cross-validation:
for n_neighbors in [1, 3, 5, 10, 20]:
scores = cross_val_score(KNeighborsRegressor(n_neighbors=n_neighbors), X, y, cv=cv)
print("n_neighbors: %d, average score: %f" % (n_neighbors, np.mean(scores)))
# +
rng = np.random.RandomState(seed=1234)
x = np.linspace(-3,3,100)
y = np.sin(4*x) + x + rng.normal(size=len(x))
cv = KFold(shuffle=True)
for n in [1,3,5,10]:
clf = KNeighborsRegressor(n)
score = cross_val_score(clf,X,y,cv=cv)
print("for neigh: %d score %f"%(n,np.mean(score)))
# -
# There is a function in scikit-learn, called ``validation_plot`` to reproduce the cartoon figure above. It plots one parameter, such as the number of neighbors, against training and validation error (using cross-validation):
from sklearn.model_selection import validation_curve
n_neighbors = [1, 3, 5, 10, 20, 50]
train_scores, test_scores = validation_curve(KNeighborsRegressor(), X, y, param_name="n_neighbors",
param_range=n_neighbors, cv=cv)
plt.plot(n_neighbors, train_scores.mean(axis=1), 'b', label="train accuracy")
plt.plot(n_neighbors, test_scores.mean(axis=1), 'g', label="test accuracy")
plt.ylabel('Accuracy')
plt.xlabel('Number of neighbors')
plt.xlim([50, 0])
plt.legend(loc="best");
# <div class="alert alert-warning">
# Note that many neighbors mean a "smooth" or "simple" model, so the plot uses a reverted x axis.
# </div>
# If multiple parameters are important, like the parameters ``C`` and ``gamma`` in an ``SVM`` (more about that later), all possible combinations are tried:
# +
from sklearn.model_selection import cross_val_score, KFold
from sklearn.svm import SVR
# each parameter setting do cross-validation:
for C in [0.001, 0.01, 0.1, 1, 10]:
for gamma in [0.001, 0.01, 0.1, 1]:
scores = cross_val_score(SVR(C=C, gamma=gamma), X, y, cv=cv)
print("C: %f, gamma: %f, average score: %f" % (C, gamma, np.mean(scores)))
# -
# As this is such a very common pattern, there is a built-in class for this in scikit-learn, ``GridSearchCV``. ``GridSearchCV`` takes a dictionary that describes the parameters that should be tried and a model to train.
#
# The grid of parameters is defined as a dictionary, where the keys are the parameters and the values are the settings to be tested.
# +
from sklearn.model_selection import GridSearchCV
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]}
grid = GridSearchCV(SVR(), param_grid=param_grid, cv=cv,verbose=3)
# -
# One of the great things about GridSearchCV is that it is a *meta-estimator*. It takes an estimator like SVR above, and creates a new estimator, that behaves exactly the same - in this case, like a regressor.
# So we can call ``fit`` on it, to train it:
grid.fit(X, y)
# What ``fit`` does is a bit more involved then what we did above. First, it runs the same loop with cross-validation, to find the best parameter combination.
# Once it has the best combination, it runs fit again on all data passed to fit (without cross-validation), to built a single new model using the best parameter setting.
# Then, as with all models, we can use ``predict`` or ``score``:
#
grid.predict(X)
# You can inspect the best parameters found by ``GridSearchCV`` in the ``best_params_`` attribute, and the best score in the ``best_score_`` attribute:
print(grid.best_score_)
print(grid.best_params_)
# But you can investigate the performance and much more for each set of parameter values by accessing the `cv_results_` attributes. The `cv_results_` attribute is a dictionary where each key is a string and each value is array. It can therefore be used to make a pandas DataFrame.
type(grid.cv_results_)
print(grid.cv_results_.keys())
# +
import pandas as pd
cv_results = pd.DataFrame(grid.cv_results_)
cv_results.head()
# -
cv_results_tiny = cv_results[['param_C', 'param_gamma', 'mean_test_score']]
cv_results_tiny.sort_values(by='mean_test_score', ascending=False).head()
# There is a problem with using this score for evaluation, however. You might be making what is called a multiple hypothesis testing error. If you try very many parameter settings, some of them will work better just by chance, and the score that you obtained might not reflect how your model would perform on new unseen data.
# Therefore, it is good to split off a separate test-set before performing grid-search. This pattern can be seen as a training-validation-test split, and is common in machine learning:
# <img src="figures/grid_search_cross_validation.svg" width="100%">
# We can do this very easily by splitting of some test data using ``train_test_split``, training ``GridSearchCV`` on the training set, and applying the ``score`` method to the test set:
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]}
cv = KFold(n_splits=10, shuffle=True)
grid = GridSearchCV(SVR(), param_grid=param_grid, cv=cv)
grid.fit(X_train, y_train)
grid.score(X_test, y_test)
# -
# We can also look at the parameters that were selected:
grid.best_params_
# Some practitioners go for an easier scheme, splitting the data simply into three parts, training, validation and testing. This is a possible alternative if your training set is very large, or it is infeasible to train many models using cross-validation because training a model takes very long.
# You can do this with scikit-learn for example by splitting of a test-set and then applying GridSearchCV with ShuffleSplit cross-validation with a single iteration:
#
# <img src="figures/train_validation_test2.svg" width="100%">
# +
from sklearn.model_selection import train_test_split, ShuffleSplit
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1)
param_grid = {'C': [0.001, 0.01, 0.1, 1, 10], 'gamma': [0.001, 0.01, 0.1, 1]}
single_split_cv = ShuffleSplit(n_splits=1)
grid = GridSearchCV(SVR(), param_grid=param_grid, cv=single_split_cv, verbose=3)
grid.fit(X_train, y_train)
grid.score(X_test, y_test)
# -
# This is much faster, but might result in worse hyperparameters and therefore worse results.
clf = GridSearchCV(SVR(), param_grid=param_grid)
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
# <div class="alert alert-success">
# <b>EXERCISE</b>:
# <ul>
# <li>
# Apply grid-search to find the best setting for the number of neighbors in ``KNeighborsClassifier``, and apply it to the digits dataset.
# </li>
# </ul>
# </div>
# +
from sklearn.neighbors import KNeighborsClassifier
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_digits
digits = load_digits()
X,y = digits.data, digits.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=1234)
# -
params = {'n_neighbors':[1,2,5,8,12,20]}
clf = GridSearchCV(KNeighborsClassifier(),params,cv=5,verbose=3)
clf.fit(X_train,y_train)
print(clf.best_params_, clf.best_score_)
# +
# # %load solutions/14_grid_search.py
from sklearn.datasets import load_digits
from sklearn.neighbors import KNeighborsClassifier
digits = load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, random_state=0)
param_grid = {'n_neighbors': [1, 3, 5, 10, 50]}
gs = GridSearchCV(KNeighborsClassifier(), param_grid=param_grid, cv=5, verbose=3)
gs.fit(X_train, y_train)
print("Score on test set: %f" % gs.score(X_test, y_test))
print("Best parameters: %s" % gs.best_params_)
# -
print(gs.best_params_, gs.best_score_)
| notebooks/14.Model_Complexity_and_GridSearchCV.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/grzegorzkwolek/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/GKwol_LS_DSPT3_111_A_First_Look_at_Data.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Okfr_uhwhS1X" colab_type="text"
# # Lambda School Data Science - A First Look at Data
#
#
# + [markdown] id="h-NQkrR98dYQ" colab_type="text"
# Datasets:
# https://archive.ics.uci.edu/ml/index.php
# www.quandl.com
#
# df[df[condition]] creates a subset dataframe of the lines meeting the condition
#
# plt.scatter
# plt.plot
# + [markdown] id="9dtJETFRhnOG" colab_type="text"
# ## Lecture - let's explore Python DS libraries and examples!
#
# The Python Data Science ecosystem is huge. You've seen some of the big pieces - pandas, scikit-learn, matplotlib. What parts do you want to see more of?
# + id="3WGsmJsN9QCL" colab_type="code" colab={}
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="WiBkgmPJhmhE" colab_type="code" outputId="471fba89-aa50-426d-879f-31fab1fed4d8" colab={"base_uri": "https://localhost:8080/", "height": 34}
2 + 2
# + [markdown] id="lOqaPds9huME" colab_type="text"
# ## Assignment - now it's your turn
#
# Pick at least one Python DS library, and using documentation/examples reproduce in this notebook something cool. It's OK if you don't fully understand it or get it 100% working, but do put in effort and look things up.
# + id="TGUS79cOhPWj" colab_type="code" colab={}
# TODO - your code here
# Use what we did live in lecture as an example
# + [markdown] id="BT9gdS7viJZa" colab_type="text"
# ### Assignment questions
#
# After you've worked on some code, answer the following questions in this text block:
#
# 1. Describe in a paragraph of text what you did and why, as if you were writing an email to somebody interested but nontechnical.
#
# 2. What was the most challenging part of what you did?
#
# 3. What was the most interesting thing you learned?
#
# 4. What area would you like to explore with more time?
#
#
#
# + [markdown] id="_XXg2crAipwP" colab_type="text"
# ## Stretch goals and resources
#
# Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub (and since this is the first assignment of the sprint, open a PR as well).
#
# - [pandas documentation](https://pandas.pydata.org/pandas-docs/stable/)
# - [scikit-learn documentation](http://scikit-learn.org/stable/documentation.html)
# - [matplotlib documentation](https://matplotlib.org/contents.html)
# - [Awesome Data Science](https://github.com/bulutyazilim/awesome-datascience) - a list of many types of DS resources
#
# Stretch goals:
#
# - Find and read blogs, walkthroughs, and other examples of people working through cool things with data science - and share with your classmates!
# - Write a blog post (Medium is a popular place to publish) introducing yourself as somebody learning data science, and talking about what you've learned already and what you're excited to learn more about.
| module1-afirstlookatdata/GKwol_LS_DSPT3_111_A_First_Look_at_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="yitlha1UimxW"
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="dDjOKyerisZe"
# ### Building a dataset
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="3FKdszaVirdC" outputId="eaa3073f-b85a-4186-f755-0a1888bc2090"
Sx = np.array([0, 1, 2.5, 3, 4, 5])
Sy = np.array([0.6, 0, 2, 2.2, 4.7, 5])
# Plotting in graph
plt.scatter(Sx, Sy)
# Graph axis names and grids
plt.grid(True)
plt.xlabel('Sx')
plt.ylabel('Sy')
# + [markdown] id="HyQCP2XokTqq"
# Lets assume a line
#
# $$y = mx + c$$
#
# Where $m$ and $c$ are unknown, which we are trying to find.
#
# We assume a random value for $m$ and $c$ ($m = 2$ and $c = 2$)
# + id="TXXK2p4Gi9-0"
m = tf.Variable(2, dtype=tf.float32)
c = tf.Variable(2, dtype=tf.float32)
def line_fn(x):
return m*x + c
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="fOmpTrMpoTPV" outputId="650ae5a6-123c-4a17-ca47-0747d46f6c50"
p = np.arange(0, 5, 0.1)
plt.plot(p, line_fn(p).numpy())
# Plotting in graph
plt.scatter(Sx, Sy)
# Graph axis names and grids
plt.grid(True)
plt.xlabel('Sx')
plt.ylabel('Sy')
# + [markdown] id="uIYUfoD88l3F"
# ## Gradient descending algorithm:
# $$m_{t} = m_{t-1} - lr \; \frac{\partial \;\; loss(l(x), y)}{\partial m} $$
#
# $$loss(l(x), y) = (l(x) - y)^2$$
#
# #### Here,
#
# * $t$ = Time step
# * $x$ = Input
# * $y$ = Output
# * $m$ = Updatable variable
# * $loss(\cdot, \cdot)$ = Loss function
# * $lr$ = Learning rate
# * $l(\cdot)$ = Line function
# + [markdown] id="c5xNkNpTAVm0"
# #### Partial derivatives:
#
# $\frac{\partial \;\; loss(l(x), y)}{\partial m} = (l(x) - y)^2$
# $ = (mx+c-y)^2$
# $ = 2(mx+c-y)x$
#
# $\frac{\partial \;\; loss(l(x), y)}{\partial c} = (l(x) - y)^2$
# $ = (mx+c-y)^2$
# $ = 2(mx+c-y)$
# + colab={"base_uri": "https://localhost:8080/"} id="n3HPcI68kPXS" outputId="86c6a274-fc0b-4aeb-cd04-a92c75b2ff6a"
# learning rate
lr = 0.01
total_steps = 100
for step in range(total_steps):
print(f"Step {step+1:2}:")
print("-"*30)
with tf.GradientTape() as tape:
# Printing value of the variables
print(f"M: {m.numpy():.4f}, C: {c.numpy():.4f}")
# Stating what variables need to be partially differentiated and calibrated
tape.watch([m, c])
# Passing the points to the line function
pred_y = line_fn(Sx)
# Calculating the difference/loss of the output (pred_y) of the function
# w.r.t. the known output (Sy)
loss = (pred_y - Sy) * (pred_y - Sy)
# Calculating the gradients w.r.t. the partially diff. parameters
# and the generated output loss
grads = tape.gradient(loss, [m, c])
# Showing the output just for educational purposs
print(f"M_grad:, {grads[0].numpy():.2f}, C_grad: {grads[1].numpy():.2f}")
# Updating the gradients
m = m - lr * grads[0]
c = c - lr * grads[1]
print()
# + [markdown] id="TxHoUb4VBARF"
# ## Lets check the final result
# + colab={"base_uri": "https://localhost:8080/", "height": 296} id="rh5HNbytmd6W" outputId="df383f61-eba2-4a76-ab0e-4e029036e00b"
p = np.arange(0, 5, 0.1)
plt.plot(p, line_fn(p).numpy())
# Plotting in graph
plt.scatter(Sx, Sy)
# Graph axis names and grids
plt.grid(True)
plt.xlabel('Sx')
plt.ylabel('Sy')
# + id="HDsw4mslo2CC"
| Day 1/LineAssumption.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="zX4Kg8DUTKWO"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + id="ioLbtB3uGKPX" executionInfo={"status": "ok", "timestamp": 1604593151781, "user_tz": -60, "elapsed": 1017, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08618602917479372870"}}
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
# + id="iSq4t32ZHHpt" executionInfo={"status": "ok", "timestamp": 1604593233562, "user_tz": -60, "elapsed": 67039, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "08618602917479372870"}} outputId="754d8d81-f1fb-49c9-fca0-34a9d6aeb57b" colab={"base_uri": "https://localhost:8080/", "height": 641, "referenced_widgets": ["77d82689fb7b459d88c75c7b7cbdb916", "1d59461cf4564a51a5aa777faa5a4416", "a7dba674adc34647aa29c3e62293062a", "df986f45c0b64358a7e10c37ac31cd73", "b7066432bb0e4d54bf6a0e0d473b2deb", "<KEY>", "c272f254efe94e8e895608b5043faed1", "<KEY>", "b030e0aa7494479bb12e584a0c450b64", "<KEY>", "d5a6fca30307419cacdeda588f1224d8", "d3b8bf05ccc847ba9595d942cf746c6c", "f7bfad247b2041a18cc3a9ab07ab6a8b", "ebb6c91ef5364283826681256ed523b4", "<KEY>", "659948afcdcf4b7eab8908d2fff9c4cf", "c31f8374d69c40b69e2617be14c07d73", "<KEY>", "ef6ff1631b1d4a87af7ad064113ecb7a", "5db4e501e0d140fe8c51a09752750fbb", "<KEY>", "6a4098031e0d4168977dbecc3ae85fd3", "<KEY>", "f751e99a207a443294c4267e75d1d3d8", "<KEY>", "9614f847fa5b4cadb6b0f1172c4c475c", "<KEY>", "<KEY>", "ffb00651ea984828972b243e3e388d57", "49c51a1efeea4cd7bba6488f6ab250be", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "face3e669ce146de84bb6fc71d2d4c0c", "bf927d9b2e24426a9de53ae602cb5073", "3b26da40e6ba4f53a2f169a882b77821", "ba745db8f562411082e8868116a5781b"]}
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_addons as tfa
data = tfds.load('horses_or_humans', split='train', as_supervised=True)
val_data = tfds.load('horses_or_humans', split='test', as_supervised=True)
train_batches = data.shuffle(100).batch(32)
validation_batches = val_data.batch(32)
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(16, (3,3), activation='relu',
input_shape=(300, 300, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])
history = model.fit(train_batches, epochs=10, validation_data=validation_batches, validation_steps=1)
# + id="1cxYOtd_NKiJ"
| Copia di horse-or-human.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## List Slicing
# *Suggested Answers follow (usually there are multiple ways to solve a problem in Python).*
Numbers = Numbers = [15, 40, 50, 100, 115, 140]
# Using list slicing, obtain the numbers 100 and 115.
Numbers[3:5]
# Using slicing, extract the first four elements from the list.
Numbers[:4]
# Using slicing, extract all the elements from the list from the 3rd position onwards.
Numbers[3:]
# Using slicing, extract the last 4 elements from the list.
Numbers[-4:]
# Which is the position of the value 15?
Numbers.index(15)
# Create a list, called "Two_Numbers". Let its elements be the values 1 and 2. Then, create a new one, named "All_Numbers", that will containt both the "Numbers" and the "Two_Numbers" lists.
Two_Numbers = [1,2]
All_Numbers = [Two_Numbers, Numbers]
All_Numbers
# Sort all the numbers in the "Numbers" list from the largest to the smallest.
Numbers.sort(reverse=True)
Numbers
| 11 - Introduction to Python/7_Sequences/3_List Slicing (4:30)/List Slicing - Solution_Py3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width="400" align="center"></a>
#
# <h1 align=center><font size="5"> SVM (Support Vector Machines)</font></h1>
# In this notebook, you will use SVM (Support Vector Machines) to build and train a model using human cell records, and classify cells to whether the samples are benign or malignant.
#
# SVM works by mapping data to a high-dimensional feature space so that data points can be categorized, even when the data are not otherwise linearly separable. A separator between the categories is found, then the data is transformed in such a way that the separator could be drawn as a hyperplane. Following this, characteristics of new data can be used to predict the group to which a new record should belong.
# <h1>Table of contents</h1>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
# <li><a href="#load_dataset">Load the Cancer data</a></li>
# <li><a href="#modeling">Modeling</a></li>
# <li><a href="#evaluation">Evaluation</a></li>
# <li><a href="#practice">Practice</a></li>
# </ol>
# </div>
# <br>
# <hr>
import pandas as pd
import pylab as pl
import numpy as np
import scipy.optimize as opt
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# %matplotlib inline
import matplotlib.pyplot as plt
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <h2 id="load_dataset">Load the Cancer data</h2>
# The example is based on a dataset that is publicly available from the UCI Machine Learning Repository (Asuncion and Newman, 2007)[http://mlearn.ics.uci.edu/MLRepository.html]. The dataset consists of several hundred human cell sample records, each of which contains the values of a set of cell characteristics. The fields in each record are:
#
# |Field name|Description|
# |--- |--- |
# |ID|Clump thickness|
# |Clump|Clump thickness|
# |UnifSize|Uniformity of cell size|
# |UnifShape|Uniformity of cell shape|
# |MargAdh|Marginal adhesion|
# |SingEpiSize|Single epithelial cell size|
# |BareNuc|Bare nuclei|
# |BlandChrom|Bland chromatin|
# |NormNucl|Normal nucleoli|
# |Mit|Mitoses|
# |Class|Benign or malignant|
#
# <br>
# <br>
#
# For the purposes of this example, we're using a dataset that has a relatively small number of predictors in each record. To download the data, we will use `!wget` to download it from IBM Object Storage.
# __Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)
# + button=false new_sheet=false run_control={"read_only": false}
#Click here and press Shift+Enter
# !wget -O cell_samples.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/cell_samples.csv
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# ### Load Data From CSV File
# + button=false new_sheet=false run_control={"read_only": false}
cell_df = pd.read_csv("cell_samples.csv")
cell_df.head()
# -
# The ID field contains the patient identifiers. The characteristics of the cell samples from each patient are contained in fields Clump to Mit. The values are graded from 1 to 10, with 1 being the closest to benign.
#
# The Class field contains the diagnosis, as confirmed by separate medical procedures, as to whether the samples are benign (value = 2) or malignant (value = 4).
#
# Lets look at the distribution of the classes based on Clump thickness and Uniformity of cell size:
ax = cell_df[cell_df['Class'] == 4][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='DarkBlue', label='malignant');
cell_df[cell_df['Class'] == 2][0:50].plot(kind='scatter', x='Clump', y='UnifSize', color='Yellow', label='benign', ax=ax);
plt.show()
# ## Data pre-processing and selection
# Lets first look at columns data types:
cell_df.dtypes
# It looks like the __BareNuc__ column includes some values that are not numerical. We can drop those rows:
cell_df = cell_df[pd.to_numeric(cell_df['BareNuc'], errors='coerce').notnull()]
cell_df['BareNuc'] = cell_df['BareNuc'].astype('int')
cell_df.dtypes
feature_df = cell_df[['Clump', 'UnifSize', 'UnifShape', 'MargAdh', 'SingEpiSize', 'BareNuc', 'BlandChrom', 'NormNucl', 'Mit']]
X = np.asarray(feature_df)
X[0:5]
# We want the model to predict the value of Class (that is, benign (=2) or malignant (=4)). As this field can have one of only two possible values, we need to change its measurement level to reflect this.
cell_df['Class'] = cell_df['Class'].astype('int')
y = np.asarray(cell_df['Class'])
y [0:5]
# ## Train/Test dataset
# Okay, we split our dataset into train and test set:
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=4)
print ('Train set:', X_train.shape, y_train.shape)
print ('Test set:', X_test.shape, y_test.shape)
# <h2 id="modeling">Modeling (SVM with Scikit-learn)</h2>
# The SVM algorithm offers a choice of kernel functions for performing its processing. Basically, mapping data into a higher dimensional space is called kernelling. The mathematical function used for the transformation is known as the kernel function, and can be of different types, such as:
#
# 1.Linear
# 2.Polynomial
# 3.Radial basis function (RBF)
# 4.Sigmoid
# Each of these functions has its characteristics, its pros and cons, and its equation, but as there's no easy way of knowing which function performs best with any given dataset, we usually choose different functions in turn and compare the results. Let's just use the default, RBF (Radial Basis Function) for this lab.
from sklearn import svm
clf = svm.SVC(kernel='rbf')
clf.fit(X_train, y_train)
# After being fitted, the model can then be used to predict new values:
yhat = clf.predict(X_test)
yhat [0:5]
# <h2 id="evaluation">Evaluation</h2>
from sklearn.metrics import classification_report, confusion_matrix
import itertools
def plot_confusion_matrix(cm, classes,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
# +
# Compute confusion matrix
cnf_matrix = confusion_matrix(y_test, yhat, labels=[2,4])
np.set_printoptions(precision=2)
print (classification_report(y_test, yhat))
# Plot non-normalized confusion matrix
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['Benign(2)','Malignant(4)'],normalize= False, title='Confusion matrix')
# -
# You can also easily use the __f1_score__ from sklearn library:
from sklearn.metrics import f1_score
f1_score(y_test, yhat, average='weighted')
# Lets try jaccard index for accuracy:
from sklearn.metrics import jaccard_similarity_score
jaccard_similarity_score(y_test, yhat)
# <h2 id="practice">Practice</h2>
# Can you rebuild the model, but this time with a __linear__ kernel? You can use __kernel='linear'__ option, when you define the svm. How the accuracy changes with the new kernel function?
# write your code here
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
#
# clf2 = svm.SVC(kernel='linear')
# clf2.fit(X_train, y_train)
# yhat2 = clf2.predict(X_test)
# print("Avg F1-score: %.4f" % f1_score(y_test, yhat2, average='weighted'))
# print("Jaccard score: %.4f" % jaccard_similarity_score(y_test, yhat2))
#
# -->
# + [markdown] button=false new_sheet=false run_control={"read_only": false}
# <h2>Want to learn more?</h2>
#
# IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a>
#
# Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a>
#
# <h3>Thanks for completing this lesson!</h3>
#
# <h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a></h4>
# <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>
#
# <hr>
#
# <p>Copyright © 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
| SVM-cancer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 爬取文章上的內文的所有文章
#
# 1. 你有可能會遇到「是否滿18歲」的詢問頁面
# 2. 解析 ptt.cc/bbs 裏面文章的結構
# 3. 爬取文章
# 4. 解析並確認圖片格式
# 5. 下載圖片
#
# URL https://www.ptt.cc/bbs/Gossiping/M.1538373690.A.72D.html
#
# BACKUP https://afuntw.github.io/Test-Crawling-Website/pages/ptt/M.1538373690.A.72D.html
# +
import requests
import re
import json
import os
from PIL import Image
from bs4 import BeautifulSoup, NavigableString
from pprint import pprint
# -
ARTICLE_URL = 'https://www.ptt.cc/bbs/Gossiping/M.1538373690.A.72D.html'
# ## 爬取文章
resp = requests.get(ARTICLE_URL, cookies={'over18': '1'})
assert resp.status_code == 200
soup = BeautifulSoup(resp.text, 'lxml')
main_content = soup.find(id = 'main-content')
img_link = main_content.findAll('a', recursive=False)
pprint(img_link)
# ## 檢查並下載圖片
def check_and_download_img(url, savedir='download_img'):
image_resp = requests.get(url, stream=True)
image = Image.open(image_resp.raw)
filename = os.path.basename(url)
# check format
real_filename = '{}.{}'.format(
filename.split('.')[0],
image.format.lower()
)
print('check and fixed filename {} -> {}'.format(filename, real_filename))
# download
if not os.path.exists(savedir):
os.makedirs(savedir)
savepath = os.path.join(savedir, real_filename)
image.save(savepath)
print('save imag - {}'.format(savepath))
for tag in img_link:
check_and_download_img(tag['href'])
| appendix_ptt/03_crawl_image.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Directional Analysis of Dynamic LISAs
#
# This notebook demonstrates how to use Rose diagram based inference for directional LISAs.
import pysal.lib
import numpy as np
from pysal.explore.giddy.directional import Rose
# %matplotlib inline
f = open(pysal.lib.examples.get_path('spi_download.csv'), 'r')
lines = f.readlines()
f.close()
lines = [line.strip().split(",") for line in lines]
names = [line[2] for line in lines[1:-5]]
data = np.array([list(map(int, line[3:])) for line in lines[1:-5]])
sids = list(range(60))
out = ['"United States 3/"',
'"Alaska 3/"',
'"District of Columbia"',
'"Hawaii 3/"',
'"New England"','"Mideast"',
'"Great Lakes"',
'"Plains"',
'"Southeast"',
'"Southwest"',
'"Rocky Mountain"',
'"Far West 3/"']
snames = [name for name in names if name not in out]
sids = [names.index(name) for name in snames]
states = data[sids,:]
us = data[0]
years = np.arange(1969, 2009)
rel = states/(us*1.)
gal = pysal.lib.io.open(pysal.lib.examples.get_path('states48.gal'))
w = gal.read()
w.transform = 'r'
Y = rel[:, [0, -1]]
Y.shape
Y
np.random.seed(100)
r4 = Rose(Y, w, k=4)
# ## Visualization
r4.plot()
r4.plot(Y[:,0]) # condition on starting relative income
r4.plot(attribute=r4.lag[:,0]) # condition on the spatial lag of starting relative income
r4.plot_vectors() # lisa vectors
r4.plot_vectors(arrows=False)
r4.plot_origin() # origin standardized
# ## Inference
#
# The Rose class contains methods to carry out inference on the circular distribution of the LISA vectors. The first approach is based on a two-sided alternative where the null is that the distribution of the vectors across the segments reflects independence in the movements of the focal unit and its spatial lag. Inference is based on random spatial permutations under the null.
r4.cuts
r4.counts
np.random.seed(1234)
r4.permute(permutations=999)
r4.p
# Here all the four sector counts are significantly different from their expectation under the null.
# A directional test can also be implemented. Here the direction of the departure from the null due to positive co-movement of a focal unit and its spatial lag over the time period results in two two general cases. For sectors in the positive quadrants (I and III), the observed counts are considered extreme if they are larger than expectation, while for the negative quadrants (II, IV) the observed counts are considered extreme if they are small than the expected counts under the null.
r4.permute(alternative='positive', permutations=999)
r4.p
r4.expected_perm
# Finally, a directional alternative reflecting negative association between the movement of the focal unit and its lag has the complimentary interpretation to the positive alternative: lower counts in I and III, and higher counts in II and IV relative to the null.
r4.permute(alternative='negative', permutations=999)
r4.p
| notebooks/explore/giddy/directional.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# Loading files into FSLeyes and adjusting settings manually is cumbersom and error prone. Luckily, which FSLeyes it is possible to script the display of files, which saves time and makes the result figures more reproducible.
#
# There are different ways how we can interact with FSLeyes programatically:
# * We can open a Python shell in the FSLeyes GUI from `View` > `Python shell`
# * We can open FSLeyes from the command line and run a script at the same time: `fsleyes -r myscript.py`
# * for an interactive mode, one can also use a Jupyter Notebook `fsleyes --notebookFile my_notebook.ipynb`
#
# Before we continue, here two links with the relevant User Guides:
# * [FSLeyes documentation](https://users.fmrib.ox.ac.uk/~paulmc/fsleyes/userdoc/latest/index.html)
# * [FSLeyes Python API documentation](https://users.fmrib.ox.ac.uk/~paulmc/fsleyes/apidoc/latest/)
#
# Please note that I put together this tutorial based on my own understanding of the software and this might or not be the best way to use it. If in doubt, please refer to these User Guides or contact the FSLeyes developers to get advice.
# # Installation
# If you installed FSLeyes as part of FSL (prior to version 6), you have a standalone version. If you want to use it via the interactive mode, however, it is recommended to use it as Python package. From FSL version 6 onwards, this is automatically included (I think...).
#
# Here are the steps to install FSLeyes for this tutorial:
# * Download and install miniconda (https://docs.conda.io/en/latest/miniconda.html)
#
# * In your terminal create a new environment:
#
# `$ conda create -n fsleyes_tutorial python=3.7`
#
# * When the environment is installed, activate it:
#
# `$ conda activate fsleyes_tutorial`
#
#
# * Install the FSLeyes Python package to the new environment:
#
# `$ conda install -c conda-forge fsleyes`
#
#
# * Check that the path to FSLeyes is inside your environment
#
# `$ which fsleyes`
#
#
# # Start tutorial
#
# If you haven't done so already, load the fsleyes_tutorial conda environment (see above)
# Then navigate to the retreat folder:
#
# `cd ~/myPath/tutorial`
#
# Launch the tutorial notebook together with FSLeyes:
#
# `fsleyes --notebookFile scripts/fsleyes_tutorial.ipynb`
# # Basic example
# As a first example to see how the interactive mode is working, we will load the human standard MNI brain using the command below. You might notice that you don't need to import the 'load' function, because the some useful packages are already impored when FSLeyes is launched. You might also notice that that the variable `FSLDIR` is acessible within FSLeyes, but only if it was defined within the terminal session, where we launched FSLeyes.
import os
load(os.path.expandvars('$FSLDIR/data/standard/MNI152_T1_2mm'))
# All files that we load are stored automatically in a list called `overlayList`, which holds all the Image objects. We can access the first element, which is the MNI brain, using regular indexing:
# first element of list:
overlayList[0]
# As a simple manipulation we can change the colour map from greyscale to `Render3`. Note that the default colour maps are directly accessible in this way, but we will other custom colour maps can be included (see more below).
displayCtx.getOpts(overlayList[0]).cmap = 'Render3'
# We can remove this file again, because we don't need it for now:
overlayList.remove(overlayList[0])
# # Goal for this session
# The goal of this session is to have a script that automatically creates the display of a structural brain scan together with tractography results from two tracts. The cursor will be centered on the voxel of maximal probability of one tract. The script will contain one variable to define, which subject group we want to display (for example controls, patient group 1, patient group 2, etc. Changing this variable change all the settings adaptively so that a comparable display is created.
# # Input files and settings
# We need to provide the filenames of interest in an organized way, where Pandas data frames can be handy.
# In the tutorial example the filenames have very convenient names, and are organized in a neat way, which will most likely not be the case in a real-life example
# +
import pandas as pd
# folder where data is stored
mydir = os.path.join('mypath', 'tutorial', 'data')
# filenames
df = pd.DataFrame(columns=['subject_group', 'structural', 'CST', 'MDLF'])
df.loc[len(df)] = ['control', 'structural', 'cst', 'mdlf']
df.loc[len(df)] = ['patient-group1', 'structural', 'cst', 'mdlf']
df.loc[len(df)] = ['patient-group2', 'structural', 'cst', 'mdlf']
# -
# Here we define the variable for 'subject-group':
subject_group = 'control'
# In the following lines we define two different colours for the two tracts that we will display. We will later access the correct colour based on the index of the tract. In a similar way, we could define settings that differ for the three subject groups.
# +
# colour for the two tracts
my_colours = np.array([[0. , 0.6 , 1. ], # blue
[1. , 0.33, 0.68]]) # pink
# -
# The tractography results that we load have been normalized with intensities franging from 0 to 1. Therefore, we can apply a comparable threshold of 0.2 to both tracts. In a different situation we might want to have this setting variable depending on tract type or subject group.
# display range for thresholding the tracts
display_range = (0.2, 1)
# # Generate Display
# +
# import package for colour map (should be at top of script)
from matplotlib.colors import LinearSegmentedColormap
# make sure all previous overlays are removed
overlayList.clear()
# load structural
structural_fname = f'{df[df.subject_group == subject_group].structural.values[0]}.nii.gz'
load(os.path.join(os.sep, mydir, structural_fname))
# load tractograms
# loop over hemispheres
for hemi in ['l', 'r']:
for i_t, tract in enumerate(['CST', 'MDLF']):
tract_fname = os.path.join(os.sep, mydir, f'{df[df.subject_group == subject_group][tract].values[0]}_{hemi}.nii.gz')
load(os.path.join(os.sep, mydir, tract_fname))
# set display range and clipping range
displayCtx.getOpts(overlayList[-1]).clippingRange = display_range
displayCtx.getOpts(overlayList[-1]).displayRange = display_range
# set colour map specific for tract type
# use a colour map where luminance linearly increases from black to white
displayCtx.getOpts(overlayList[-1]).cmap = LinearSegmentedColormap.from_list('mycmap', ['black', my_colours[i_t], 'white'])
# determine max voxel for MDLF tractogram in left hemisphere
if (hemi == 'l') & (tract == 'MDLF'):
max_voxel = np.unravel_index(np.argmax(overlayList[-1].data, axis=None), overlayList[-1].data.shape)
# place cross hair on maximal voxel for MDLF_L
displayCtx = frame.viewPanels[0].displayCtx
displayCtx.location = displayCtx.getOpts(overlayList[-1]).transformCoords(max_voxel, 'voxel', 'display')
# -
# # Importing Atlases
# It is possible to include custom atlases to FSLeyes and they will be included in the Atlas panel in the GUI.
# Note that any custom atlas files must be described by an XML specification file as outlined [here](https://users.fmrib.ox.ac.uk/~paulmc/fsleyes/userdoc/latest/customising.html#atlases).
#
# It's just a single line of code:
# + pycharm={"name": "#%%\n"}
import fsl
fsl.data.atlases.addAtlas('/myPath/tutorial/myatlas.xml')
| data_visualization/fsleyes_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Watch Me Code 2: Exam Grades
#
# A simple example of reading and writing multiple lines of data to a file.
#
# +
from os import path
filename = path.join("relativefolder", "WMC2-Grades.txt")
print(filename)
# Let's input a bunch of grades and write them to a file:
count = 0
with open(filename, 'w', encoding="utf-8") as in_file:
while True:
grade = input("Please enter a grade or type 'q' to quit: ")
if grade == 'q':
break
in_file.write(grade + "\n")
count = count + 1
print("%d grades entered" % count)
with open(filename, 'r', encoding="utf-8") as out_file:
grades = []
for line in out_file:
grades.append(int(line))
average = sum(grades)/len(grades)
print("Average is %d" % average)
# -
# read them in
count = 0
total = 0
with open(filename,'r') as inf:
for line in inf.readlines():
grade = float(line)
total = total + grade
count = count + 1
print("Read in %s grades. Average is %.2f" % (count, total/count))
# NOTE: Show this is really persistent.
#
# - open wmc1.txt from outside Jupyter.
# - edit the message. Re-run the 2nd example.
| content/lessons/08/Watch-Me-Code/WMC2-Exam-Grades.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Kernel Shape Example
# Spring 2019 AME-70790 Final Project
#
# <NAME> (<EMAIL>)
#
# Reference: <NAME>., & <NAME>. (1994). Kernel smoothing. Chapman and Hall/CRC.
# ___
# In the previous example we looked at how the bandwidth of a kernel in significantly influence the kernel smoother prediction.
# Now let us look at the impact of the shape of the kernel function $K$.
# Again consider this arbitrary density:
# $$f_{1}(x)=\frac{3}{4}\phi\left(x | 0,1\right) + \frac{1}{4}\phi\left(x | 3/2, 1/3\right),$$
# where $\phi(x)$ is the normal PDF making this a mixture of two Gaussians. We will use 1000 training data points to approximate this density function with the kernel density estimator.
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
# +
plt.close("all")
np.random.seed(123)
ntrain = 1000
h = 0.25 # band width
# Target data
x_test = np.linspace(-3,3,200)
y_test = 0.75*norm.pdf(x_test, loc=0, scale=1) + 0.75*norm.pdf(x_test, loc=3/2, scale=1/3)
# Training data
c1 = np.sum(np.random.rand(ntrain) < 0.75)
c2 = ntrain - c1
x_train0 = np.concatenate([np.random.randn(c1), (1/3.)*np.random.randn(c1)+1.5], axis=0)
# -
# Here we will consider three different kernels.
# For kernels to be comparable, three constraints are imposed:
# $$\int K(x)dx = 1, \quad \int xK(x)dx=0, \quad \int x^{2}K(x)dx=a^{2}<\infty,$$
# which impose normalization, symmetry and the variance respectively.
# Although kernel symmetry is not required, symmetric kernels are commonly used since they are easier to interpret.
# Its important to note that the variance is **not** always the bandwidth.
# To illustrate this point and the impact of kernel shape on the KDE prediction, we will compare kernels using the same *bandwidth* but different standard deviation versus kernels using the same *standard deviation* (a=h).
#
# The first will be the standard Gaussian kernel we used in the previous example:
# $$f(x,h)=(nh)^{-1}\sum_{i=1}^{n}K\left(\frac{x-x_{i}}{h}\right), \quad K(x)=\frac{1}{\sqrt{2\pi} a}\exp\left\{\frac{-x^{2}}{2 a^{2}}\right\},$$
# where $a$ is the standard deviation.
# The normal kernel is unique in the sense that the bandwidth and standard deviation are the interchangeable.
# +
# Set-up prediction points
x_pred0 = np.linspace(-3, 3, 500)
# Expand array dims and repeat
x_pred = np.expand_dims(x_pred0, axis=1).repeat(x_train0.shape[0], axis=1)
x_train = np.expand_dims(x_train0, axis=0).repeat(x_pred0.shape[0], axis=0)
x0 = x_pred-x_train
normal_pred = []
normal_mass = []
# Compute normal kernel using set bandwidth
x0_scaled = x0/h
y_pred0 = (1/(ntrain*h))*np.sum(norm.pdf(x0_scaled), axis=1)
normal_pred.append(y_pred0)
# Compute normal kernel using set variance
y_pred0 = (1/ntrain)*np.sum(norm.pdf(x0, scale=h), axis=1)
normal_pred.append(y_pred0)
normal_mass.append(norm.pdf(x_pred0/h))
normal_mass.append(norm.pdf(x_pred0, scale=h))
# -
# For the second kernel we will use the *Epanechnikov kernel*, which is given by:
# $$K(x)=\frac{3}{4}\left[\frac{1-x^{2}/(5a^{2})}{a\sqrt{5}}\right]\mathbf{1}_{|x|<a\sqrt{5}},$$
# where $\mathbf{1}_{A}$ is the indicator function and $a$ is its scale parameter.
# The most commonly used standard deviation is $a^2=1/5$ as this clearly simplifies the kernel.
# This kernel is particularly unique in the sense that there are theoretical arguments that the Epanechnikov kernel is the optimal kernel based on data efficiency, although the Gaussian is more frequently used.
# Additionally, we note that both the Gaussian kernel and Epanechnikov kernel are derived from the same family:
# $$K(x,p)=\left[2^{2p+1}B(p+1,p+1)\right]^{-1}\left(1-x^{2}\right)^{p}\mathbf{1}_{|x|<1},$$
# where $B(a,b)$ is the beta function.
# The Gaussian kernel and Epanechnikov kernel can be recovered when $p\rightarrow \infty$ and $p=1$ respectively.
# Additional kernels in this family include the Bi-weight and Tri-weight which are when $p=2$ and $p=3$ respectively.
# +
epan_pred = []
epan_mass = []
# Compute Epanechnikov kernel using set bandwidth (a^2 = 1/5)
x0_scaled = x0/h
y_pred0 = (1/(ntrain*h))*np.sum(0.75*(1-x0_scaled**2)*(np.abs(x0_scaled) < 1), axis=1)
epan_pred.append(y_pred0)
# Compute Epanechnikov kernel using set variance
y_pred0 = (1/ntrain)*np.sum(0.75*((1-x0**2/(5*h**2))/(h*np.sqrt(5)))*(np.abs(x0) < h*np.sqrt(5)), axis=1)
epan_pred.append(y_pred0)
epan_mass.append(0.75*(1-(x_pred0/h)**2)*(np.abs(x_pred0/h) < 1))
epan_mass.append(0.75*((1-x_pred0**2/(5*h**2))/(h*np.sqrt(5)))*(np.abs(x_pred0) < h*np.sqrt(5)))
# -
# The third kernel we will consider is the *triangular kernel* which is defined as:
# $$K(x)=\frac{1}{a\sqrt{6}}\left(1-\frac{|x|}{a\sqrt{6}}\right)\mathbf{1}_{|x|<a\sqrt{6}}.$$
# Similar to the Epanechnikov kernel the most commonly used standard deviation is $a^2 = 1/6$ due to the obvious simplifications.
# These and other kernels can be reference on [Wikipedia](https://en.wikipedia.org/wiki/Kernel_%28statistics%29).
# +
tri_pred = []
tri_mass = []
# Compute Triangular kernel using set bandwidth (a^2 = 1/6)
x0_scaled = x0/h
y_pred0 = (1/(ntrain*h))*np.sum((1-np.abs(x0_scaled)) * (np.abs(x0_scaled) < 1), axis=1)
tri_pred.append(y_pred0)
# Compute Triangular kernel using set variance
h0 = h*np.sqrt(6)
y_pred0 = (1/ntrain)*np.sum((1-np.abs(x0)/h0)/h0 * (np.abs(x0) < h0), axis=1)
tri_pred.append(y_pred0)
tri_mass.append((1-np.abs(x_pred0/h)) * (np.abs(x_pred0/h) < 1))
tri_mass.append((1-np.abs(x_pred0)/h0)/h0 * (np.abs(x_pred0) < h0))
# +
fig = plt.figure(figsize=(15,10))
ax = []
ax.append(plt.subplot2grid((2, 2), (0, 0)))
ax.append(plt.subplot2grid((2, 2), (0, 1)))
ax.append(plt.subplot2grid((2, 2), (1, 0)))
ax.append(plt.subplot2grid((2, 2), (1, 1)))
for i in range(2):
# Normal prediction
ax[i].plot(x_pred0, normal_pred[i], '--', c='r', label='Normal Kernel')
# Epanechnikov prediction
ax[i].plot(x_pred0, epan_pred[i], '--', c='b', label='Epanechnikov Kernel')
# Triangular prediction
ax[i].plot(x_pred0, tri_pred[i], '--', c='g', label='Triangular Kernel')
# Target density
ax[i].plot(x_test, y_test, c='k', label='Target')
ax[i].set_xlabel('x')
ax[i].set_ylabel('Density')
# Kernel Mass
ax[i + 2].plot(x_pred0, normal_mass[i], c='r')
ax[i + 2].plot(x_pred0, epan_mass[i], c='b')
ax[i + 2].plot(x_pred0, tri_mass[i], c='g')
ax[i + 2].set_xlim([-1,1])
ax[i + 2].set_title('Kernel Mass')
ax[i + 2].set_xlabel('x')
ax[i + 2].set_ylabel('Density')
ax[0].set_title('KDE same bandwidth')
ax[1].set_title('KDE same variance')
ax[0].legend(loc=2)
# Save and show figure
plt.savefig('figs/02_kernel_shape.pdf')
plt.savefig('figs/02_kernel_shape.png')
plt.show()
# -
# (Left to right) The KDE using kernels with the same bandwidth but different variance and the KDE using kernels with the same variance. (Top to bottom) The KDE and the kernel mass. We can see that when the kernels have roughly the shape (or standard deviation) the KDE is approximately the same. Thus bandwidth **and** kernel shape have strong influence over the density estimates.
| 02_kernel_shape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sensitivity: Algebraic explanation
# ## Changes in the coefficients of the objective function
# Let us start with the production Mix example from class:
# The final Tableau is:
#
# | Basic | z | $x_{1}$ | $x_{2}$ | $s_{1}$ | $s_{2}$ | $s_{3}$ | RHS | Ratio |
# |---------|---|---------|---------|---------|---------|---------|------|-------|
# | - | 1 | 0 | 0 | 0 | 250/3 | 650/3 | 6350 | - |
# | $s_{1}$ | 0 | 0 | 0 | 1 | -1/3 | -5/3 | 5 | - |
# | $x_{2}$ | 0 | 0 | 1 | 0 | 1/3 | -1/3 | 11 | - |
# | $x_{1}$ | 0 | 1 | 0 | 0 | 0 | 1 | 12 | - |
#
# Let us assume we want to make a change in coefficient $c_{1}$ of $\Delta c_{1}$. The objective function becomes:
#
# $max Z = (300 +\Delta c_{1})·x_{1} + 250x_{2}$
#
# It can be proved that the final Tableau now becomes:
#
# | Basic | z | $x_{1}$ | $x_{2}$ | $s_{1}$ | $s_{2}$ | $s_{3}$ | RHS | Ratio |
# |---------|---|-----------------|---------|---------|---------|---------|------|-------|
# | - | 1 | $-\Delta c_{1}$ | 0 | 0 | 250/3 | 650/3 | 6350 | - |
# | $s_{1}$ | 0 | 0 | 0 | 1 | -1/3 | -5/3 | 5 | - |
# | $x_{2}$ | 0 | 0 | 1 | 0 | 1/3 | -1/3 | 11 | - |
# | $x_{1}$ | 0 | 1 | 0 | 0 | 0 | 1 | 12 | - |
#
# The Simplex method requires that the Tableau is in canonical form. Thus we need to add the last row times $\Delta c_{1}$ to the first row to fulfill this:
#
# | Basic | z | $x_{1}$ | $x_{2}$ | $s_{1}$ | $s_{2}$ | $s_{3}$ | RHS | Ratio |
# |---------|---|---------|---------|---------|---------|------------------------|------|-------|
# | - | 1 | 0 | 0 | 0 | 250/3 | 650/3 + $\Delta c_{1}$ | 6350 + 12·$\Delta c_{1}$ | - |
# | $s_{1}$ | 0 | 0 | 0 | 1 | -1/3 | -5/3 | 5 | - |
# | $x_{2}$ | 0 | 0 | 1 | 0 | 1/3 | -1/3 | 11 | - |
# | $x_{1}$ | 0 | 1 | 0 | 0 | 0 | 1 | 12 | - |
# And taking into account that the coefficient of $s_{3}$ needs to be positive for $x_{1}$ to remain in the basis, $\Delta c_{1}$ cannot be lower than -650/3 and can increase up to infinity.
# Let us now assume we want to make a change in coefficient $c_{2}$ of $\Delta c_{2}$. The objective function becomes:
#
# $max Z = 300·x_{1} + (250 +\Delta c_{2})·x_{2}$
#
# The final Tableau now becomes:
#
# | Basic | z | $x_{1}$ | $x_{2}$ | $s_{1}$ | $s_{2}$ | $s_{3}$ | RHS | Ratio |
# |---------|---|-----------------|---------|---------|---------|---------|------|-------|
# | - | 1 | 0 | $-\Delta c_{2}$ | 0 | 250/3 | 650/3 | 6350 | - |
# | $s_{1}$ | 0 | 0 | 0 | 1 | -1/3 | -5/3 | 5 | - |
# | $x_{2}$ | 0 | 0 | 1 | 0 | 1/3 | -1/3 | 11 | - |
# | $x_{1}$ | 0 | 1 | 0 | 0 | 0 | 1 | 12 | - |
#
# Again, the Simplex method requires that the Tableau is in canonical form. Thus we need to add the second last row times $\Delta c_{2}$ to the first row to fulfill this:
#
# | Basic | z | $x_{1}$ | $x_{2}$ | $s_{1}$ | $s_{2}$ | $s_{3}$ | RHS | Ratio |
# |---------|---|---------|---------|---------|---------|------------------------|------|-------|
# | - | 1 | 0 | 0 | 0 | 250/3 + $\Delta c_{2}/3$ | 650/3 - $\Delta c_{2}/3$ | 6350 + 11·$\Delta c_{2}$ | - |
# | $s_{1}$ | 0 | 0 | 0 | 1 | -1/3 | -5/3 | 5 | - |
# | $x_{2}$ | 0 | 0 | 1 | 0 | 1/3 | -1/3 | 11 | - |
# | $x_{1}$ | 0 | 1 | 0 | 0 | 0 | 1 | 12 | - |
# And taking into account that the coefficients of $s_{2}$ and $s_{3}$ need to be positive for $x_{2}$ to remain in the basis, $\Delta c_{2}$ cannot be lower than -250 nor higher than 650.
# ## Changes in the constraint independent terms
# Now let us see what is the effect of changing b, for instance decreasing $b_{1}$ an amount equal to $\Delta b_{1}$. In the original problem formulation, this means:
#
# $2x_{1} + x_{2} + s_{1} = 40 - \Delta b_{1} $
# $x_{1} + 3x_{2} + s_{2} = 45$
# $x_{1} + s_{3} = 12$
# Changing the Right Hand Side by $\Delta b_{1}$ is equivalent to changing $s_{1}$ an amount of $\Delta b_{1}$. Note that $s_{1}$ is in the basis, thus for it to remain in the basis, it must satisfy:
#
# $s_{1}=5 + \Delta b_{1} \geq 0$
#
# $\Delta b_{1} \geq -5$
#
# Now, take the second constraint and let us apply the same change:
#
# $2x_{1} + x_{2} + s_{1} = 40 $
# $x_{1} + 3x_{2} + s_{2} = 45 - \Delta b_{2} $
# $x_{1} + s_{3} = 12$
#
# Again, this is equivalent to:
#
# $2x_{1} + x_{2} + s_{1} = 40 $
# $x_{1} + 3x_{2} + s_{2} + \Delta b_{2} = 45 $
# $x_{1} + s_{3} = 12$
#
# Which can be regarded as a change of $\Delta b_{2}$ in $s_{2}$. Now, if we change $s_{2}$ from $s_{2}=0$ to $s_{2} =\Delta b_{2}$ in the final Tableau:
#
# | Basic | z | $x_{1}$ | $x_{2}$ | $s_{1}$ | $s_{2}$ | $s_{3}$ | RHS | Ratio |
# |---------|---|---------|---------|---------|------------------------|---------|------|-------|
# | - | 1 | 0 | 0 | 0 | 250/3·$\Delta b_{2}$ | 650/3 | 6350 | - |
# | $s_{1}$ | 0 | 0 | 0 | 1 | -1/3 ·$\Delta b_{2}$ | -5/3 | 5 | - |
# | $x_{2}$ | 0 | 0 | 1 | 0 | 1/3 ·$\Delta b_{2}$ | -1/3 | 11 | - |
# | $x_{1}$ | 0 | 1 | 0 | 0 | 0 ·$\Delta b_{2}$ | 1 | 12 | - |
#
# The basis remains unchanged as long as all the constraints are still met:
#
# $s_{1} - 1/3 · \Delta b_{2} = 5 $
# $x_{2} + 1/3· \Delta b_{2} = 11 $
# $x_{1} + 0 ·\Delta b_{2} = 12$
#
# $s_{1} = 5 + 1/3 · \Delta b_{2} \geq 0 (\Delta b_{2} \geq -15)$
# $x_{2} = 11 - 1/3· \Delta b_{2} \geq 0 (\Delta b_{2} \leq 33)$
# $x_{1} = 12 - 0·\Delta b_{2} \geq 0 (\Delta b_{2} \leq Inf)$
| docs/source/CLP/tutorials/Sensitivity - Algebraic explanation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.6.1
# language: julia
# name: julia-0.6
# ---
# +
# GPU: 32*40 in 8.00s = 160/s
# CPU: 32*8 in 115.0s = 2/s
# +
# After installing and starting Julia run the following to install the required packages:
# Pkg.init(); Pkg.update()
# for p in ("CUDAapi","CUDAdrv","MAT","Images","IJulia","Knet"); Pkg.add(p); end
# Pkg.checkout("Knet","ilkarman") # make sure we have the right Knet version
# Pkg.build("Knet")
# -
using Knet
include(Knet.dir("examples","resnet", "resnetlib.jl"))
using ResNetLib: resnet50init, resnet50
println("OS: ", Sys.KERNEL)
println("Julia: ", VERSION)
println("Knet: ", Pkg.installed("Knet"))
;cat /proc/cpuinfo '|' grep processor '|' wc -l
;nvidia-smi --query-gpu=gpu_name --format=csv
const BATCH_SIZE = 32
const RESNET_FEATURES = 2048
const BATCHES_GPU = 40
const BATCHES_CPU = 8
# Create batches of fake data
function fakedata(batches; atype=KnetArray)
x = rand(Float32, 224, 224, 3, BATCH_SIZE * batches)
minibatch(x, BATCH_SIZE, xtype=atype)
end
# Return features from classifier
function predictfn(weights, moments, data)
out = []
for x in data
pred = resnet50(weights, moments, x; stage=5)
push!(out, mat(pred))
end
return Array(hcat(out...))
end
# ## 1. GPU
# Initialize resnet weights and fake data
gpuweights = gpumoments = nothing; knetgc() # clear memory from previous run
gpuweights, gpumoments = resnet50init(;stage=5, trained=true, atype=KnetArray);
info("Cold start")
gpudata1 = fakedata(BATCHES_GPU, atype=KnetArray)
@time predictfn(gpuweights, gpumoments, gpudata1);
info("Benchmarking")
gpudata = fakedata(BATCHES_GPU, atype=KnetArray)
@time predictfn(gpuweights, gpumoments, gpudata);
# ## 2. CPU
# Initialize resnet weights
cpuweights, cpumoments = resnet50init(;stage=5, trained=true, atype=Array);
info("Cold start")
cpudata1 = fakedata(1, atype=Array);
@time predictfn(cpuweights, cpumoments, cpudata1);
info("Benchmarking")
cpudata = fakedata(BATCHES_CPU, atype=Array);
@time predictfn(cpuweights, cpumoments, cpudata);
| deep-learning/multi-frameworks/notebooks/Knet_Inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Form Parsing using Google Cloud Document AI
#
# This notebook shows how to use Google Cloud Document AI to parse a campaign disclosure form.
#
# It accompanies this Medium article:
# https://medium.com/@lakshmanok/how-to-parse-forms-using-google-cloud-document-ai-68ad47e1c0ed
# ### Document
#
# As an example, let's take this US election campaign disclosure form.
# !ls *.pdf
from IPython.display import IFrame
IFrame("./scott_walker.pdf", width=600, height=300)
# ## Upload to Cloud Storage
#
# Document AI works with documents on Cloud Storage, so let's upload the doc.
BUCKET="ai-analytics-solutions-kfpdemo" # CHANGE to a bucket that you own
# !gsutil cp scott_walker.pdf gs://{BUCKET}/formparsing/scott_walker.pdf
# !gsutil ls gs://{BUCKET}/formparsing/scott_walker.pdf
# ## Enable Document AI
#
# 1. First enable Document AI in your project by visiting
# https://console.developers.google.com/apis/api/documentai.googleapis.com/overview
#
# 2. Find out who you are running as:
# !gcloud auth list
# 3. Create a service account authorization by visiting
# https://console.cloud.google.com/iam-admin/serviceaccounts/create
# Give this service account Document AI Core Service Account authorization
#
# 4. Give the above ACTIVE ACCOUNT the ability to use the service account you just created.
# ## Call Document AI
# + language="bash"
# PDF="gs://ai-analytics-solutions-kfpdemo/formparsing/scott_walker.pdf" # CHANGE to your PDF file
# REGION="us" # change to EU if the bucket is in the EU
#
# cat <<EOM > request.json
# {
# "inputConfig":{
# "gcsSource":{
# "uri":"${PDF}"
# },
# "mimeType":"application/pdf"
# },
# "documentType":"general",
# "formExtractionParams":{
# "enabled":true
# }
# }
# EOM
#
# # Send request to Document AI.
# PROJECT=$(gcloud config get-value project)
# echo "Sending the following request to Document AI in ${PROJECT} ($REGION region), saving to response.json"
# cat request.json
#
# curl -X POST \
# -H "Authorization: Bearer "$(gcloud auth application-default print-access-token) \
# -H "Content-Type: application/json; charset=utf-8" \
# -d @request.json \
# https://${REGION}-documentai.googleapis.com/v1beta2/projects/${PROJECT}/locations/us/documents:process \
# > response.json
# -
# !tail response.json
# ## Parse the response
#
# Let's use Python to parse the response and pull out specific fields.
#
import json
ifp = open('response.json')
response = json.load(ifp)
allText = response['text']
print(allText[:100])
# ### Option 1: Parsing blocks of text
#
# As an example, let's try to get the "Cash on Hand". This is in Page 2 and the answer is $75,931.36
# All the data in the document is the allText field. we just need to find the right starting and ending index
# for what we want to extract.
print(allText.index("CASH ON HAND"))
# We know that "Cash on Hand" is on Page 2.
response['pages'][1]['blocks'][5]
response['pages'][1]['blocks'][5]['layout']['textAnchor']['textSegments'][0]
startIndex = int(response['pages'][1]['blocks'][5]['layout']['textAnchor']['textSegments'][0]['startIndex'])
endIndex = int(response['pages'][1]['blocks'][5]['layout']['textAnchor']['textSegments'][0]['endIndex'])
allText[startIndex:endIndex]
# Cool, we are at the right part of the document! Let's get the next block, which should be the actual amount.
# +
def extractText(allText, elem):
startIndex = int(elem['textAnchor']['textSegments'][0]['startIndex'])
endIndex = int(elem['textAnchor']['textSegments'][0]['endIndex'])
return allText[startIndex:endIndex].strip()
amount = float(extractText(allText, response['pages'][1]['blocks'][6]['layout']))
print(amount)
# -
# ### Option 2: Parsing form fields
#
# What we did with blocks of text was quite low-level. Document AI understands that forms tend to have key-value pairs, and part of the JSON response includes these extracted key-value pairs as well.
#
# Besides FormField Document AI also supports getting Paragraph and Table from the document.
response['pages'][1].keys()
response['pages'][1]['formFields'][2]
fieldName = extractText(allText, response['pages'][1]['formFields'][2]['fieldName'])
fieldValue = extractText(allText, response['pages'][1]['formFields'][2]['fieldValue'])
print('key={}\nvalue={}'.format(fieldName, fieldValue))
# Enjoy!
#
# Copyright 2020 Google Inc. Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| blogs/form_parser/formparsing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %load_ext autoreload
# %matplotlib inline
import os
from pathlib import Path
from tempfile import TemporaryDirectory
import numpy as np
import nibabel as nb
import nitransforms as nt
from nipype.interfaces.afni import Warp
cwd = TemporaryDirectory()
os.chdir(cwd.name)
print(os.getcwd())
# -
# !wget https://nipy.org/nibabel/_downloads/62985f4c43f499609fa65cb2eb955b79/someones_anatomy.nii.gz
# Load the example
nii = nb.load('someones_anatomy.nii.gz')
hdr = nii.header.copy()
aff = nii.affine.copy()
data = np.asanyarray(nii.dataobj)
nii.orthoview()
# +
# Create LAS version
las_aff = aff.copy()
las_aff[0, 0] *= -1.0
las_aff[0, 3] = aff.dot(np.hstack((np.array(nii.shape[:3]) - 1, 1.0)))[0]
las = nb.Nifti1Image(np.flip(data, 0), las_aff, nii.header)
las.to_filename('someones_anatomy_LAS.nii.gz')
# Create LPS version
lps_aff = aff.copy()
lps_aff[0, 0] *= -1.0
lps_aff[1, 1] *= -1.0
lps_aff[:2, 3] = aff.dot(np.hstack((np.array(nii.shape[:3]) - 1, 1.0)))[:2]
lps = nb.Nifti1Image(np.flip(np.flip(data, 0), 1), lps_aff, nii.header)
lps.to_filename('someones_anatomy_LPS.nii.gz')
# Create version not aligned to canonical
R = nb.affines.from_matvec(nb.eulerangles.euler2mat(x=0.09, y=0.001, z=0.001))
M = R.dot(nii.affine)
new = nb.Nifti1Image(data, M, hdr)
new.header.set_qform(M, 1)
new.header.set_sform(M, 1)
new.to_filename('someones_anatomy_rot.nii.gz')
# Create version not aligned to canonical (LPS)
lpsM = R.dot(lps.affine)
lpsnew = nb.Nifti1Image(np.flip(np.flip(data, 0), 1), lpsM, hdr)
lpsnew.header.set_qform(lpsM, 1)
lpsnew.header.set_sform(lpsM, 1)
lpsnew.to_filename('someones_anatomy_lpsrot.nii.gz')
# -
print(new.affine)
print(R.dot(nii.affine))
R[:3, :3].dot(nii.affine[:3, 3])
print(np.linalg.inv(np.diag([-1, -1, 1, 1]).dot(R)).T)
#!3dWarp -deoblique -prefix deoblps.nii.gz someones_anatomy_lpsrot.nii.gz
Warp(in_file='someones_anatomy_rot.nii.gz', deoblique=True, verbose=True, out_file='deob.nii.gz').run()
from nibabel.affines import from_matvec
B = np.ones((2, 2))
AFNI_SIGNS = np.block([[B, -1.0 * B], [-1.0 * B, B]])
def _afni_warpdrive_for(oblique, plumb, offset=True, inv=False):
"""
Calculate AFNI's ``WARPDRIVE_MATVEC_FOR_000000`` (de)obliquing affine.
Parameters
----------
oblique : 4x4 numpy.array
affine that is not aligned to the cardinal axes.
plumb : 4x4 numpy.array
corresponding affine that is aligned to the cardinal axes.
Returns
-------
plumb_to_oblique : 4x4 numpy.array
the matrix that pre-pended to the plumb affine rotates it
to be oblique.
"""
R = np.linalg.inv(plumb[:3, :3]).dot(oblique[:3, :3])
origin = oblique[:3, 3] - R.dot(oblique[:3, 3])
if offset is False:
origin = np.zeros(3)
matvec_inv = from_matvec(R, origin) * AFNI_SIGNS
if not inv:
return np.linalg.inv(matvec_inv)
return matvec_inv
R = nb.affines.from_matvec(nb.eulerangles.euler2mat(x=0.09, y=0.001, z=0.001))
# D = _afni_to_oblique(new.affine, nii.affine)
# print(R)
# print(D)
print(_afni_warpdrive_for(new.affine, nii.affine, offset=False))
print(deob.header.extensions[0].get_content().decode())
print(R.T)
print(R.dot(nii.affine[:, 3]) - nii.affine[:, 3])
print(nii.affine[:, 3] - new.affine[:, 3])
print(new.affine[:, 3] - np.linalg.inv(R).T.dot(new.affine[:, 3]))
print('\n'.join([str(row) for row in _afni_deoblique(new.affine, nii.affine)]))
# +
print(new.affine[:, 3] - nii.affine[:, 3])
print(nb.as_closest_canonical(new).affine)
print(new.affine)
print(new.header.get_best_affine())
# -
afni_dicom = np.diag([-1, -1, 1, 1]).dot(new.affine)
print(afni_dicom)
# +
deob = nb.load('deob.nii.gz')
print(nii.affine)
print(new.affine)
print(deob.affine)
# -
print(new.affine[:, -1])
print(R.dot(deob.affine[:, -1]))
# +
center = lps.affine[:3, :3].dot(np.array(lps.shape) - 0.5) * -0.5
origin = lps.affine[:3, -1]
print(center)
print(center - origin)
# -
center = lpsnew.affine[:3, :3].dot(np.array(lpsnew.shape) - 0.5) * -0.5
origin = lpsnew.affine[:3, -1]
print(center - origin)
R_afni = deob.affine.dot(np.linalg.inv(lpsnew.affine))
print(R_afni)
print(np.linalg.inv(R))
print(deob.header.extensions[0].get_content().decode())
R_afni = nb.load('deob.nii.gz').affine.dot(np.linalg.inv(new.affine))
print(R_afni)
print(np.linalg.inv(R))
R_afni = new.affine.dot(np.linalg.inv(nb.load('deob.nii.gz').affine))
print(R_afni)
print(R)
#!3dWarp -deoblique -prefix deob.nii.gz -NN someones_anatomy_rot.nii.gz
#!3dAllineate -base someones_anatomy.nii.gz -input someones_anatomy.nii.gz -master deob.nii.gz -1Dmatrix_apply for2.afni -prefix deob-for2.nii.gz -final NN
nb.load('deob.nii.gz').orthoview()
# nb.load('moved-for-T-inv.nii.gz').orthoview()
# nb.load('moved-inv-T-for.nii.gz').orthoview()
# nb.load('deob-for.nii.gz').orthoview()
nb.load('deob-for2.nii.gz').orthoview()
# nb.load('moved-affine-oblique-inv-T-for.nii.gz').orthoview()
# nb.load('moved-affine-oblique-inv-T-for-master.nii.gz').orthoview()
deob = nb.load('deob.nii.gz')
nb.Nifti1Image(np.abs(deob.get_fdata() - nb.load('deob-for2.nii.gz').get_fdata()), deob.affine, deob.header).orthoview()
| docs/notebooks/02 - AFNI Deoblique.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cse6363] *
# language: python
# name: conda-env-cse6363-py
# ---
# +
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision.models as models
from torchvision import transforms
from torchvision.datasets import Food101
from pytorch_lightning.callbacks import ModelCheckpoint
import pytorch_lightning as pl
class ImagenetTransferLearning(pl.LightningModule):
def __init__(self, data_path, batch_size, lr):
super().__init__()
self.data_path = data_path
self.batch_size = batch_size
self.lr = lr
# Data preparation
dataset = Food101(data_path, transform=transforms.Compose([
transforms.RandAugment(),
transforms.RandomCrop(224),
transforms.ToTensor(),
]), download=True)
dataset_size = len(dataset)
train_size = int(dataset_size * .95)
val_size = dataset_size - train_size
self.train_dataset, self.val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
# Loss function
self.loss_fn = nn.CrossEntropyLoss()
# init a pretrained resnet
backbone = models.resnet50(pretrained=True)
num_filters = backbone.fc.in_features
layers = list(backbone.children())[:-1]
self.feature_extractor = nn.Sequential(*layers)
# use the pretrained model to classify food101
num_target_classes = 101
self.classifier = nn.Linear(num_filters, num_target_classes)
def forward(self, x):
self.feature_extractor.eval()
with torch.no_grad():
representations = self.feature_extractor(x).flatten(1)
x = self.classifier(representations)
return x
def training_step(self, batch, batch_idx):
input, target = batch
output = self(input)
loss = self.loss_fn(output, target)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
input, target = batch
output = self(input)
loss = self.loss_fn(output, target)
self.log("val_loss", loss)
def configure_optimizers(self):
return optim.Adam(self.parameters(), lr=self.lr)
def train_dataloader(self):
return torch.utils.data.DataLoader(self.train_dataset,
batch_size=self.batch_size,
num_workers=8,
shuffle=True)
def val_dataloader(self):
return torch.utils.data.DataLoader(self.val_dataset,
batch_size=self.batch_size,
num_workers=8,
shuffle=False)
# +
checkpoint_callback = ModelCheckpoint(
monitor="val_loss",
mode="min"
)
model = ImagenetTransferLearning("/home/alex/Data/food/", 32, 1e-3)
trainer = pl.Trainer(accelerator="gpu", callbacks=[checkpoint_callback], max_epochs=5)
trainer.fit(model)
| deep_learning/transfer_learning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/tiffypabo/OOP-58001/blob/main/Operations_and_Expressions_in_Python.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="r39xdfXlkPi8"
# Boolean Operators
# + colab={"base_uri": "https://localhost:8080/"} id="I_51QR20kfym" outputId="a0e77642-7467-4282-b3ce-fd9736c36b3a"
print(10>9)
print(10==9)
print(10!=9)
print(10<9)
# + [markdown] id="8csQdC8glxnI"
# Bool() function
# + colab={"base_uri": "https://localhost:8080/"} id="qOWIQNPEl40g" outputId="c30e2331-d97a-4a06-8184-7bb083072d2e"
print(bool("Tiffany"))
print(bool(1005))
print(bool(0))
print(bool(1))
print(bool(None))
# + [markdown] id="QStZvXN-mlT6"
# Function can return Boolean
# + colab={"base_uri": "https://localhost:8080/"} id="YnXkARZAmqdn" outputId="ce0aedd0-976f-4598-f2dc-f2d0e53d7302"
def myFunction():
return True
print(myFunction())
# + colab={"base_uri": "https://localhost:8080/"} id="0OoQZkLhni7j" outputId="8fddb48d-8a96-4507-df96-6430168759ae"
def myFunction():
return True
if myFunction():
print("Yes.")
else:
print("No.")
# + [markdown] id="So0EkOUhobY0"
# Application 1
# + colab={"base_uri": "https://localhost:8080/"} id="3h5nAWdEoTFG" outputId="0e61b2c6-9748-452a-e17f-b2309beeee29"
a=6
b=7
print(a>b)
if a<b:
print("a is less than b")
else:
print("a is greater than b")
print(a==b)
print(a!=a)
# + [markdown] id="sm9GfMIHrI8G"
# Python Operators
# + colab={"base_uri": "https://localhost:8080/"} id="8oFHXDWerMwK" outputId="3ee4c1c0-fbb9-4a77-df39-517ea09eccfa"
print(10+3)
print(10-3)
print(10*3)
print(10//3)
print(10%3)
print(10/3)
# + [markdown] id="aK6Vb8l2rgn7"
# Bitwise Operators
# + colab={"base_uri": "https://localhost:8080/"} id="AEHGs85Trin-" outputId="40edda0b-ed09-4b16-ecd8-cc2d26e42077"
a=60
b=13
print(a>b)
if a<b:
print("a is less than b")
else:
print("a is greater than b")
print(a==b)
print(a!=a)
# + colab={"base_uri": "https://localhost:8080/"} id="SPc-xwWrr3Zt" outputId="3c179e9c-6db7-4315-e4b1-4856c55ed67e"
# a = 60 , binary 0011 1100
# b = 13 , binary 0000 1101
print(a&b)
print(a|b)
print(a^b)
print(a<<1)
print(a<<2)
# + [markdown] id="WPaIVM_SvZFy"
# Application 2
#
# + colab={"base_uri": "https://localhost:8080/"} id="HnmmKKoNw-XN" outputId="b43a25b1-ca90-4cae-b98e-28b5859f70fb"
#Assignment Operators
x=10
print(x+3)
print(x-3)
print(x*3)
print(x/3)
print(x%3)
# + [markdown] id="d8QJHufXxzHV"
# Logical Operators
# + colab={"base_uri": "https://localhost:8080/"} id="0cbe-vE9x1Oz" outputId="83373be0-f510-42f6-c5ca-6ceeddca8165"
k = True
l = False
print(k and l)
print(k or l)
print(not(k or l))
# + [markdown] id="B1Nc-rg1zhl2"
# Identity Operators
# + colab={"base_uri": "https://localhost:8080/"} id="PU3N5Q_TzlFh" outputId="42cddab0-5959-4b5a-a46c-3cd90dd07000"
k is l
k is not l
# + [markdown] id="kmZ0uZtqzoHQ"
# # Control Structure
# + [markdown] id="jNyuDbdgzr_w"
# If statement
# + colab={"base_uri": "https://localhost:8080/"} id="-RKGe8mWzrEp" outputId="fb412265-fe2f-4dab-f2c5-d41684a9dd05"
v = 2
z = 1
if 1<2:
print("1 is less than 2")
# + [markdown] id="Tjn40mJ2z9Vi"
# Elif Statement
# + colab={"base_uri": "https://localhost:8080/"} id="7bemzNGKz_Jf" outputId="b59cc0b3-f45f-46f8-d3e4-7fcd52b5bc9d"
if v<z:
print("v is less than z")
elif v>z:
print("v is greater than z")
# + [markdown] id="8LDTOkqE0gTL"
# Else Statement
# + colab={"base_uri": "https://localhost:8080/"} id="D1Wu11b70JWw" outputId="308aa299-bc55-43fe-8be9-78c76352a463"
if v<z:
print("v is less than z")
elif v>z:
print("v is greater than z")
else:
print("v is equal to z")
# + [markdown] id="br2cxQDY21BO"
# Application 3 - Develop a Python program that will accept if a person is entitled to vote or not
# + colab={"base_uri": "https://localhost:8080/"} id="BBtBYonc29ha" outputId="73e1f0bd-39cb-4569-aa80-97214b4dd33a"
age = int(input())
if age>=18:
print("You are qualified to vote")
else:
print("You are not qualified to vote")
# + [markdown] id="S2IobMXb3VF1"
# Nested If... Else
# + colab={"base_uri": "https://localhost:8080/"} id="siJDv5EW3YCp" outputId="2cfe8ba5-0cd6-4380-a7a3-defefe2aaf01"
u = int(input())
if u>10:
print("u is above 10")
if u>20:
print("u is above 20")
if u>30:
print("u is above 30")
if u>40:
print("u is above 40")
else:
print("u is below 40")
if u>50:
print("u is above 50")
else:
print("u is below 50")
# + [markdown] id="VHWjNnw564Jk"
# #Loop Structure
# + colab={"base_uri": "https://localhost:8080/"} id="N_nPfyUH68Mu" outputId="3f139cc3-d9e9-4268-e35f-4a0cb068a454"
week = ['Sunday','Monday','Tuesday','Wednesday','Thursday','Friday','Saturday']
season = ["rainy","sunny"]
for x in week:
for y in season:
print(y,x)
# + [markdown] id="7RQkoI-X8kOL"
# The break statement
# + colab={"base_uri": "https://localhost:8080/"} id="5vzbisjX8lyw" outputId="6b2dfac5-937c-433e-9ed3-87fdbd6c5f0a"
for x in week:
if x == "Thursday":
break
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="6E6SjH81-LpO" outputId="ec158447-15c9-40d8-e198-6782f8270065"
for x in week:
if x == "Thursday":
break
print(x)
# + colab={"base_uri": "https://localhost:8080/"} id="Kczy7T3m-6Hj" outputId="c99f877a-3974-4f8d-a4b3-413e173c14a8"
#To display Sunday to Thursday
for x in week:
print(x)
if x=="Thursday":
break
# + [markdown] id="Z2bcMjie_69b"
# While Loop
# + colab={"base_uri": "https://localhost:8080/"} id="dRj0p7Mi_8u0" outputId="927224d6-92d0-4606-cc81-609a4b9a467b"
i=1
while i<6:
print(i)
i+=1
# + [markdown] id="Q4BzOFxRAQ4u"
# Application 4 - Create a Python program that displays numbers from 1 to 4 using While loop statement
# + colab={"base_uri": "https://localhost:8080/"} id="N-tOgX5fAaHp" outputId="e3f74c11-107a-407f-ce28-658328c0f5cc"
j = 1
while j<=4:
print(j)
j+=1
# + [markdown] id="qa28yy8kA-5N"
# Application 5 - Create a Python program that displays 4 using While loop statement and break statement
# + colab={"base_uri": "https://localhost:8080/"} id="PE02sXrZBN77" outputId="dbc1757e-80e4-47e1-cd23-c4bc6d1db7cb"
j = 1
while j<=4:
if j==4:
print(j)
j+=1
| Operations_and_Expressions_in_Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Rishit-dagli/Deep-Learning-With-TensorFlow-Blog-series/blob/master/Part%202-Computer%20Vision%20with%20TensorFlow/Callbacks_example_Notebook.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="N9-BCmi15L93" colab_type="code" colab={}
import tensorflow as tf
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if(logs.get('acc') > 0.6):
print("\nReached 60% accuracy so cancelling training!")
self.model.stop_training = True
mnist = tf.keras.datasets.fashion_mnist
(x_train, y_train),(x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
callbacks = myCallback()
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)
])
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
model.fit(x_train, y_train, epochs=10, callbacks=[callbacks])
| Part 2-Computer Vision with TensorFlow/Callbacks_example_Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
# CONSTANTS - DO NOT CHANGE
G = 6.67E-11 # Gravitational Constant
g_0 = 9.8 # Acceleration due to gravity on earth
# +
# Parameters
M_M = 5.972E24 # Mass of orbited body (kg)
R_M = 6371 # Radius of orbited body (km)
w_M = 7.11E-5 # Equatorial angular velocity (rad/s)
mpay = 10000 # payload mass (kg)
Isp_1 = 310 # First Stage Isp (s)
Isp_2 = 340 # Second Stage Isp (s)
fInert1 = .3 # First Stage inert mass fraction
fInert2 = .25 # Second Stage inert Mass fraction
site_lattitude = 40 # Launch Site lattitude (degrees)
target_inclination = 60 # Target orbital inclination (degrees)
target_altitude = 100 # Target orbital altittude (km)
twr_1 = 1.4 # Thrust to weight ratio for stage one, recommended > 1.4
twr_2 = 1.3 # Thrust to weight ratio for stage two, recommended > 1.4
# +
g_M = G*M_M/(R_M*1000)**2 # Acceleration due to gravity for orbited body (m/s^2)
lat_rad = np.radians(site_lattitude)
inc_rad = np.radians(target_inclination)
azimuth = np.degrees(np.arcsin(np.cos(inc_rad)/np.cos(lat_rad))) # degrees
dV_earth_help = w_M*R_M*np.cos(lat_rad)*np.sin(np.radians(azimuth))*1000 # m/s
escape_vel_ideal = np.sqrt(2*G*M_M/((R_M + target_altitude)*1000)) - dV_earth_help # m/s
dV_safety_factor = 1.1
dV = escape_vel_ideal*dV_safety_factor
print('Target Inclination: %.2f deg \nTarget Altitude: %.2f km \nCalculated Azimuth: %.2f deg\nTotal dV: %.2f m/s'%(target_inclination, target_altitude, azimuth, dV))
# +
f1 = np.linspace(0, 1, 300)
f2 = 1 - f1
c_1 = g_0*Isp_1
c_2 = g_0*Isp_2
p2 = np.exp(f2*dV/c_2)*(1-fInert2)/(1-fInert2*np.exp(f2*dV/c_2))
p1 = np.exp(f1*dV/c_1)*(1-fInert1)/(1-fInert1*np.exp(f1*dV/c_1))
f_pay = p1*p2
pay_cons = f_pay[f_pay>0]
f1_cons = f1[f_pay>0]
lowest_ind = np.where(pay_cons == min(pay_cons))[0][0]
cor_f1 = f1_cons[lowest_ind]
cor_f2 = 1-cor_f1
cor_tot_mass = pay_cons[lowest_ind]
dV1 = cor_f1*dV
dV2 = cor_f2*dV
m_prop_2 = mpay*(1-np.exp(dV2/c_2))*(1-fInert2)/(1-fInert2*np.exp(dV2/c_2))
m_inert_2 = fInert2*m_prop_2/(1-fInert2)
m_final_2 = m_inert_2+mpay
m_initial_2 = m_final_2+m_prop_2
m_pay_1 = (mpay+m_initial_2)
m_prop_1 = m_pay_1*(1-np.exp(dV1/c_1))*(1-fInert1)/(1-fInert1*np.exp(dV1/c_1))
m_inert_1 = fInert1*m_prop_1/(1-fInert1)
m_final_1 = m_inert_2+m_pay_1
m_initial_1 = m_final_1+m_prop_1
tot_mass = m_initial_2 + m_initial_1
thrust_1 = twr_1*tot_mass*g_M
mdot_1 = thrust_1/c_1
t_b_1 = m_prop_1/mdot_1
thrust_2 = twr_2*m_final_1*g_M
mdot_2 = thrust_2/c_2
t_b_2 = m_prop_2/mdot_2
print('dV Splitting - 2 Stages')
print('dV Stage 1: %.2f m/s\ndV Stage 2: %.2f m/s'%(dV1, dV2))
if np.any(np.array([m_prop_2, m_inert_2, m_final_2, m_inert_1, m_final_1, m_initial_1]) < 0):
print('Error: Mass Calculation Failed')
else:
print('\nStage 1')
print('Masses:')
print(' Dry Mass: %.2f kg'%(m_final_1))
print(' Propellant Mass: %.2f kg'%(m_prop_1))
print(' Mass of Stage 2: %.2f kg'%(m_pay_1))
print('Engine Information:')
print(' Thrust: %.2f N'%(thrust_1))
print(' Mass Flow Rate: %.2f kg/s'%(mdot_1))
print(' Burn Duration: %.2f s'%(t_b_1))
print('\nStage 2')
print('Masses:')
print(' Dry Mass: %.2f kg'%(m_final_2))
print(' Propellant Mass: %.2f kg'%(m_prop_2))
print(' Payload Mass: %.2f kg'%(mpay))
print('Engine Information:')
print(' Thrust: %.2f N'%(thrust_2))
print(' Mass Flow Rate: %.2f kg/s'%(mdot_2))
print(' Burn Duration: %.2f s'%(t_b_2))
# -
| launch_vehicle_sizing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GCP Dataflow Component Sample
# A Kubeflow Pipeline component that prepares data by submitting an Apache Beam job (authored in Python) to Cloud Dataflow for execution. The Python Beam code is run with Cloud Dataflow Runner.
#
# ## Intended use
#
# Use this component to run a Python Beam code to submit a Cloud Dataflow job as a step of a Kubeflow pipeline.
#
# ## Runtime arguments
# Name | Description | Optional | Data type| Accepted values | Default |
# :--- | :----------| :----------| :----------| :----------| :---------- |
# python_file_path | The path to the Cloud Storage bucket or local directory containing the Python file to be run. | | GCSPath | | |
# project_id | The ID of the Google Cloud Platform (GCP) project containing the Cloud Dataflow job.| | String | | |
# region | The Google Cloud Platform (GCP) region to run the Cloud Dataflow job.| | String | | |
# staging_dir | The path to the Cloud Storage directory where the staging files are stored. A random subdirectory will be created under the staging directory to keep the job information.This is done so that you can resume the job in case of failure. `staging_dir` is passed as the command line arguments (`staging_location` and `temp_location`) of the Beam code. | Yes | GCSPath | | None |
# requirements_file_path | The path to the Cloud Storage bucket or local directory containing the pip requirements file. | Yes | GCSPath | | None |
# args | The list of arguments to pass to the Python file. | No | List | A list of string arguments | None |
# wait_interval | The number of seconds to wait between calls to get the status of the job. | Yes | Integer | | 30 |
#
# ## Input data schema
#
# Before you use the component, the following files must be ready in a Cloud Storage bucket:
# - A Beam Python code file.
# - A `requirements.txt` file which includes a list of dependent packages.
#
# The Beam Python code should follow the [Beam programming guide](https://beam.apache.org/documentation/programming-guide/) as well as the following additional requirements to be compatible with this component:
# - It accepts the command line arguments `--project`, `--region`, `--temp_location`, `--staging_location`, which are [standard Dataflow Runner options](https://cloud.google.com/dataflow/docs/guides/specifying-exec-params#setting-other-cloud-pipeline-options).
# - It enables `info logging` before the start of a Cloud Dataflow job in the Python code. This is important to allow the component to track the status and ID of the job that is created. For example, calling `logging.getLogger().setLevel(logging.INFO)` before any other code.
#
#
# ## Output
# Name | Description
# :--- | :----------
# job_id | The id of the Cloud Dataflow job that is created.
#
# ## Cautions & requirements
# To use the components, the following requirements must be met:
# - Cloud Dataflow API is enabled.
# - The component is running under a secret Kubeflow user service account in a Kubeflow Pipeline cluster. For example:
# ```
# component_op(...)
# ```
# The Kubeflow user service account is a member of:
# - `roles/dataflow.developer` role of the project.
# - `roles/storage.objectViewer` role of the Cloud Storage Objects `python_file_path` and `requirements_file_path`.
# - `roles/storage.objectCreator` role of the Cloud Storage Object `staging_dir`.
#
# ## Detailed description
# The component does several things during the execution:
# - Downloads `python_file_path` and `requirements_file_path` to local files.
# - Starts a subprocess to launch the Python program.
# - Monitors the logs produced from the subprocess to extract the Cloud Dataflow job information.
# - Stores the Cloud Dataflow job information in `staging_dir` so the job can be resumed in case of failure.
# - Waits for the job to finish.
#
# # Setup
# + pycharm={"name": "#%%\n"} tags=["parameters"]
project = 'Input your PROJECT ID'
region = 'Input GCP region' # For example, 'us-central1'
output = 'Input your GCS bucket name' # No ending slash
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Install Pipeline SDK
# -
# !python3 -m pip install 'kfp>=0.1.31' --quiet
# + [markdown] pycharm={"name": "#%%\n"}
#
# ## Load the component using KFP SDK
#
# + pycharm={"name": "#%%\n"}
import kfp.components as comp
dataflow_python_op = comp.load_component_from_url(
'https://raw.githubusercontent.com/kubeflow/pipelines/1.4.0/components/gcp/dataflow/launch_python/component.yaml')
help(dataflow_python_op)
# -
# ## Use the wordcount python sample
# In this sample, we run a wordcount sample code in a Kubeflow Pipeline. The output will be stored in a Cloud Storage bucket. Here is the sample code:
# !gsutil cat gs://ml-pipeline-playground/samples/dataflow/wc/wc.py
# ## Example pipeline that uses the component
import kfp
import kfp.dsl as dsl
import json
output_file = '{}/wc/wordcount.out'.format(output)
@dsl.pipeline(
name='Dataflow launch python pipeline',
description='Dataflow launch python pipeline'
)
def pipeline(
python_file_path = 'gs://ml-pipeline/sample-pipeline/word-count/wc.py',
project_id = project,
region = region,
staging_dir = output,
requirements_file_path = 'gs://ml-pipeline/sample-pipeline/word-count/requirements.txt',
args = json.dumps([
'--output', output_file
]),
wait_interval = 30
):
dataflow_python_op(
python_file_path = python_file_path,
project_id = project_id,
region = region,
staging_dir = staging_dir,
requirements_file_path = requirements_file_path,
args = args,
wait_interval = wait_interval)
# ## Submit the pipeline for execution
kfp.Client().create_run_from_pipeline_func(pipeline, arguments={})
# #### Inspect the output
# !gsutil cat $output_file
# ## References
# * [Component python code](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/component_sdk/python/kfp_component/google/dataflow/_launch_python.py)
# * [Component docker file](https://github.com/kubeflow/pipelines/blob/master/components/gcp/container/Dockerfile)
# * [Sample notebook](https://github.com/kubeflow/pipelines/blob/master/components/gcp/dataflow/launch_python/sample.ipynb)
# * [Dataflow Python Quickstart](https://cloud.google.com/dataflow/docs/quickstarts/quickstart-python)
| courses/machine_learning/deepdive2/production_ml/labs/samples/core/dataflow/dataflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# reload packages
# %load_ext autoreload
# %autoreload 2
# ### Choose GPU
# %env CUDA_DEVICE_ORDER=PCI_BUS_ID
# %env CUDA_VISIBLE_DEVICES=1
import tensorflow as tf
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if len(gpu_devices)>0:
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
print(gpu_devices)
tf.keras.backend.clear_session()
# ### Load packages
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tqdm.autonotebook import tqdm
from IPython import display
import pandas as pd
import umap
import copy
import os, tempfile
import tensorflow_addons as tfa
# ### parameters
# +
dataset = "cifar10"
labels_per_class = 16 # 'full'
n_latent_dims = 1024
confidence_threshold = 0.8 # minimum confidence to include in UMAP graph for learned metric
learned_metric = True # whether to use a learned metric, or Euclidean distance between datapoints
augmented = True #
min_dist= 0.001 # min_dist parameter for UMAP
negative_sample_rate = 5 # how many negative samples per positive sample
batch_size = 128 # batch size
optimizer = tf.keras.optimizers.Adam(1e-3) # the optimizer to train
optimizer = tfa.optimizers.MovingAverage(optimizer)
label_smoothing = 0.2 # how much label smoothing to apply to categorical crossentropy
max_umap_iterations = 50 # how many times, maximum, to recompute UMAP
max_epochs_per_graph = 50 # how many epochs maximum each graph trains for (without early stopping)
umap_patience = 5 # how long before recomputing UMAP graph
# +
from datetime import datetime
datestring = datetime.now().strftime("%Y_%m_%d_%H_%M_%S_%f")
datestring = (
str(dataset)
+ "_"
+ str(confidence_threshold)
+ "_"
+ str(labels_per_class)
+ "____"
+ datestring
+ '_umap_augmented'
)
print(datestring)
# -
# #### Load dataset
from tfumap.semisupervised_keras import load_dataset
(
X_train,
X_test,
X_labeled,
Y_labeled,
Y_masked,
X_valid,
Y_train,
Y_test,
Y_valid,
Y_valid_one_hot,
Y_labeled_one_hot,
num_classes,
dims
) = load_dataset(dataset, labels_per_class)
# ### load architecture
from tfumap.semisupervised_keras import load_architecture
encoder, classifier, embedder = load_architecture(dataset, n_latent_dims)
# ### load pretrained weights
from tfumap.semisupervised_keras import load_pretrained_weights
encoder, classifier = load_pretrained_weights(dataset, augmented, labels_per_class, encoder, classifier)
# #### compute pretrained accuracy
# test current acc
pretrained_predictions = classifier.predict(encoder.predict(X_test, verbose=True), verbose=True)
pretrained_predictions = np.argmax(pretrained_predictions, axis=1)
pretrained_acc = np.mean(pretrained_predictions == Y_test)
print('pretrained acc: {}'.format(pretrained_acc))
# ### get a, b parameters for embeddings
from tfumap.semisupervised_keras import find_a_b
a_param, b_param = find_a_b(min_dist=min_dist)
# ### build network
from tfumap.semisupervised_keras import build_model
model = build_model(
batch_size=batch_size,
a_param=a_param,
b_param=b_param,
dims=dims,
encoder=encoder,
classifier=classifier,
negative_sample_rate=negative_sample_rate,
optimizer=optimizer,
label_smoothing=label_smoothing,
embedder = embedder,
)
# ### build labeled iterator
from tfumap.semisupervised_keras import build_labeled_iterator
labeled_dataset = build_labeled_iterator(X_labeled, Y_labeled_one_hot, augmented, dims)
# ### training
from livelossplot import PlotLossesKerasTF
from tfumap.semisupervised_keras import get_edge_dataset
from tfumap.semisupervised_keras import zip_datasets
# #### callbacks
# +
# early stopping callback
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor='val_classifier_acc', min_delta=0, patience=15, verbose=0, mode='auto',
baseline=None, restore_best_weights=False
)
# plot losses callback
groups = {'acccuracy': ['classifier_accuracy', 'val_classifier_accuracy'], 'loss': ['classifier_loss', 'val_classifier_loss']}
plotlosses = PlotLossesKerasTF(groups=groups)
# -
history_list = []
current_validation_acc = 0
batches_per_epoch = np.floor(len(X_train)/batch_size).astype(int)
epochs_since_last_improvement = 0
for current_umap_iterations in tqdm(np.arange(max_umap_iterations)):
# make dataset
edge_dataset = get_edge_dataset(
model,
classifier,
encoder,
X_train,
Y_masked,
batch_size,
confidence_threshold,
labeled_dataset,
dims,
learned_metric = learned_metric
)
# zip dataset
zipped_ds = zip_datasets(labeled_dataset, edge_dataset, batch_size)
# train dataset
history = model.fit(
zipped_ds,
epochs=max_epochs_per_graph,
validation_data=(
(X_valid, tf.zeros_like(X_valid), tf.zeros_like(X_valid)),
{"classifier": Y_valid_one_hot},
),
callbacks = [early_stopping, plotlosses],
max_queue_size = 100,
steps_per_epoch = batches_per_epoch,
#verbose=0
)
history_list.append(history)
# get validation acc
pred_valid = classifier.predict(encoder.predict(X_valid))
new_validation_acc = np.mean(np.argmax(pred_valid, axis = 1) == Y_valid)
# if validation accuracy has gone up, mark the improvement
if new_validation_acc > current_validation_acc:
epochs_since_last_improvement = 0
current_validation_acc = copy.deepcopy(new_validation_acc)
else:
epochs_since_last_improvement += 1
if epochs_since_last_improvement > umap_patience:
print('No improvement in {} UMAP iterators'.format(umap_patience))
break
fig, ax = plt.subplots(figsize=(20,3))
ax.plot(np.concatenate([i.history['val_classifier_accuracy'] for i in history_list]))
# ## Save results
from tfumap.paths import MODEL_DIR, ensure_dir
save_folder = MODEL_DIR / 'semisupervised-keras' / dataset / str(labels_per_class) / datestring
ensure_dir(save_folder)
# ### save weights
encoder.save_weights((save_folder / "encoder").as_posix())
classifier.save_weights((save_folder / "classifier").as_posix())
# ### save score
class_pred = classifier.predict(encoder.predict(X_test))
class_acc = np.mean(np.argmax(class_pred, axis=1) == Y_test)
print(class_acc)
np.save(save_folder / 'test_loss.npy', (np.nan, class_acc))
# ### save embedding
z = encoder.predict(X_train)
# +
reducer = umap.UMAP(verbose=True)
embedding = reducer.fit_transform(z.reshape(len(z), np.product(np.shape(z)[1:])))
plt.scatter(embedding[:, 0], embedding[:, 1], c=Y_train.flatten(), s= 1, alpha = 0.1, cmap = plt.cm.tab10)
# -
np.save(save_folder / 'train_embedding.npy', embedding)
# ### save history
import pickle
with open(save_folder / 'history.pickle', 'wb') as file_pi:
pickle.dump([i.history for i in history_list], file_pi)
| notebooks/semisupervised/cifar10/learned-metric/old/old-augmented-Y/cifar10-aug-16ex-learned-0.8conf-Y.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
dic1 = {1:10, 2:20}
dic2 = {3:30, 4:40}
dic3 = {5:50, 6:60}
dic4 = {}
for x in (dic1, dic2, dic3):
dic4.update(x)
print(dic4)
# # [Jumlah 7 Deret Pertama Fibonacci](https://academy.dqlab.id/main/projectcode/158/290/1280)
# Buat fungsi penjumlahan deret Fibonacci
def calculateSum(n):
if n <= 0:
return 0
fibo = [0] * (n + 1)
fibo[1] = 1
# Initialisasi hasil ke dalam variabel sm
sm = fibo[0] + fibo[1]
# Tambahkan suku-suku berikutnya
for i in range(2, n + 1):
fibo[i] = fibo[i - 1] + fibo[i - 2]
sm += fibo[i]
return sm
# Evaluasi hasil deret untuk n = 7
print(calculateSum(7))
| Project/Python/Data Science Challenge with Python/Data Science Challenge with Python.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# default_exp swaps
# hide
_FNAME='swaps'
import unittest
from unittest import mock
from nbdev.export import notebook2script
import os
TESTCASE = unittest.TestCase()
_nbpath = os.path.join(_dh[0], _FNAME+'.ipynb')
# +
#export
import pandas as pd
import numpy as np
from sovrynkg.knowledge_graph import Query
from sovrynkg.contracts import whatis
def get_swap_df(skip=None, limit=None):
q = Query()
q.add("MATCH (b:Block)-[]->(:Transaction)-[]->()-[calls:CALLS]->(amm:Contract) ")
q.add("WHERE amm.address='0x98ace08d2b759a265ae326f010496bcd63c15afc'")
q.add("RETURN b.signed_at as signed_at,")
q.add("""
calls._toAmount as to_amount,
calls._fromAmount as from_amount,
calls._toToken as to_token,
calls._fromToken as from_token,
calls._smartToken as smart_token,
calls._trader as trader
""")
if skip:
q.add("SKIP {}".format(skip))
if limit:
q.add("LIMIT {}".format(limit))
swaps = q.data()
df = pd.DataFrame(swaps)
df['signed_at'] = pd.to_datetime(df['signed_at'])
df['to_token'] = df.apply(lambda row: lookup_token_name(row, 'to_token'), axis='columns')
df['from_token'] = df.apply(lambda row: lookup_token_name(row, 'from_token'), axis='columns')
df['trader'] = df.apply(lambda row: lookup_token_name(row, 'trader'), axis='columns')
df['to_amount'] = df.to_amount.astype(np.double)
df['from_amount'] = df.from_amount.astype(np.double)
return df
def lookup_token_name(row, col_name):
address = row[col_name]
matching_tokens = whatis(address)
if matching_tokens:
return matching_tokens[0].name
else:
return address
# -
notebook2script(_nbpath)
| nbs/swaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # FMA: A Dataset For Music Analysis
#
# <NAME>, <NAME>, <NAME>, <NAME>, EPFL LTS2.
#
# ## Free Music Archive web API
#
# All the data in the `fma.json` DataFrame was collected from the Free Music Archive [public API](https://freemusicarchive.org/api). With this notebook, you can:
# * reconstruct the original data,
# * update some fields, e.g. the `track_listens` (play count),
# * augment the data with other (potentially newer) fields provided by their API but not included in the release,
# * update the dataset with new songs added to the archive.
#
# Notes:
# * You need a key to access the API, which you can [request online](https://freemusicarchive.org/api/agreement) and write into your `.env` file as a new line reading `FMA_KEY=MYPERSONALKEY`.
# * Requests take some hunderd milliseconds to complete.
import utils
import IPython.display as ipd
import requests
import os
fma = utils.FreeMusicArchive(os.environ.get('FMA_KEY'))
# ## 1 Get recently added tracks
#
# Note that `track_id` are assigned in monotonically increasing order. Tracks may be removed, so that number does not indicate the number of available tracks.
for track_id, artist_name, date_created in zip(*fma.get_recent_tracks()):
print(track_id, date_created, artist_name)
# ## 2 Get meta-data about tracks, albums and artists
#
# Given IDs, we can get information about tracks, albums and artists. See the available fields in the [API documentation](https://freemusicarchive.org/api).
fma.get_track(track_id=2, fields=['track_title', 'track_date_created',
'track_duration', 'track_bit_rate',
'track_listens', 'track_interest', 'track_comments', 'track_favorites',
'artist_id', 'album_id'])
fma.get_track_genres(track_id=20)
fma.get_album(album_id=1, fields=['album_title', 'album_tracks',
'album_listens', 'album_comments', 'album_favorites',
'album_date_created', 'album_date_released'])
fma.get_artist(artist_id=1, fields=['artist_name', 'artist_location',
'artist_comments', 'artist_favorites'])
# ## 3 Get data, i.e. raw audio
#
# We can download the original track as well. Tracks are provided by the archive as MP3 with various bitrates.
fma.download_track(track_id=2, path='track.mp3')
# ## 4 Get genres
#
# Instead of compiling the genres of each track, we can get all the genres present on the archive by some API calls.
# + deletable=true editable=true
genres = utils.Genres(fma.get_all_genres())
print('{} genres'.format(genres.df.shape[0]))
genres.df[10:25]
# -
# And look for genres related to Rock.
genres.df[['Rock' in title for title in genres.df['genre_title']]]
# + deletable=true editable=true
genres.df[genres.df['genre_parent_id'] == 12]
# -
# As genres have parent genres, we can plot a tree using the [DOT] language.
#
# [DOT]: https://en.wikipedia.org/wiki/DOT_(graph_description_language)
graph = genres.create_tree([25, 31], 1)
ipd.Image(graph.create_png())
# Data cleaning: some genres returned by the archive have a `parent_id` which does not exist.
# +
# 13 (Easy Listening) has parent 126 which is missing
# --> a root genre on the website, although not in the genre menu
genres.df.loc[13, 'genre_parent_id'] = 0
# 580 (Abstract Hip-Hop) has parent 1172 which is missing
# --> listed as child of Hip-Hop on the website
genres.df.loc[580, 'genre_parent_id'] = 21
# 810 (Nu-Jazz) has parent 51 which is missing
# --> listed as child of Easy Listening on website
genres.df.loc[810, 'genre_parent_id'] = 13
# -
# Save the full genre tree as a PDF.
roots = genres.find_roots()
print('{} roots'.format(len(roots)))
graph = genres.create_tree(roots)
graph.write_pdf('genres.pdf');
| webapi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tidal elevation thresholding
#
# The script will download `n36.6w75.9.tif` from AWS bucket and perform simple thresholding with [rasterio](https://rasterio.readthedocs.io/en/latest/).
import numpy as np
import rasterio as rio
import matplotlib.pyplot as plt
# Download example to the local if not present. AWS profle needs to be authorised to download the data.
# + language="bash"
# if [ ! -f n36.6w75.9.tif ]; then
# aws s3 cp s3://slr-data/slr1/ss2/lidar/tidel/VA/n36.6w75.9.tif .
# fi
# -
# ## Input parameters
threshold = 2
source = 'n36.6w75.9.tif'
destination = 'output.tif'
# This function displays the image
def show_image(path, nodata=None):
with rio.open(path) as f:
d = f.read(1) # read the data in (from the first band)
if nodata: # replace nodata if defined
d[d == nodata] = None
plt.figure(figsize=(15,15))
plt.imshow(d)
show_image(source, nodata=-9999)
# ## Process the data
#
# We load the data with rasterio and apply binary threshold to the elevation data.
with rio.open(source) as src:
data = src.read() < threshold
if data.sum() == 0:
print('Nothing below the threshold')
profile = src.profile
profile['dtype'] = 'uint8' # We're going for binary threshold and therefore adjustinng the data type
profile.pop('nodata') # Current nodata value won't fit the data type
# ## Save the output
#
# In effort to reduce the output size, we use only a single bit to encode the output. We use information from the original file to describe coordinate system, size etc.
with rio.open(destination, 'w', nbits=1, **profile) as dst:
dst.write(data.astype(np.uint8))
show_image(destination)
| 01_standalone_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="YeiMK0WOG1Ox"
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# + id="5I4aqsryHEj1"
# !pip install qiskit
from qiskit import BasicAer
from qiskit.circuit.library import ZZFeatureMap
from qiskit.aqua import QuantumInstance, aqua_globals
from qiskit.aqua.algorithms import QSVM
from qiskit.aqua.components.multiclass_extensions import AllPairs
from qiskit.aqua.utils.dataset_helper import get_feature_dimension
# + id="EqHLLVm4KopV"
from google.colab import drive
drive.mount('/content/drive')
# + id="pnAwnnQKKu3S"
# %cd /content/drive/My Drive/Heart Dataset
# + id="exo1HWOZK6Ls"
df = pd.read_csv("./heart.csv")
df.columns = ['age', 'sex', 'cp', 'trestbps', 'chol',
'fbs', 'restecg', 'thalach', 'exang',
'oldpeak', 'slope', 'ca', 'thal', 'target']
df.isnull().sum()
df['target'] = df.target.map({0: 0, 1: 1, 2: 1, 3: 1, 4: 1})
df['sex'] = df.sex.map({'female': 0, 'male': 1})
df['thal'] = df.thal.fillna(df.thal.mean())
df['ca'] = df.ca.fillna(df.ca.mean())
# + id="6gqqdX4SHFbb"
from sklearn.model_selection import train_test_split
X = df.iloc[:, :-1].values
y = df.iloc[:, -1].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# + id="CiB5RCl9DTpC"
from sklearn.preprocessing import StandardScaler as ss
sc = ss()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
training_input = {'X': X_train,'Y': y_train}
testing_input = {'X': X_test,'Y': y_test}
temp = [testing_input[k] for k in testing_input]
total_array = np.concatenate(temp)
# + id="WX1JWmKoID3F"
aqua_globals.random_seed = 10598
backend = BasicAer.get_backend('qasm_simulator')
feature_map = ZZFeatureMap(feature_dimension=get_feature_dimension(training_input),
reps=2, entanglement='linear')
svm = QSVM(feature_map, training_input, testing_input, total_array,
multiclass_extension=AllPairs())
quantum_instance = QuantumInstance(backend, shots=1024,
seed_simulator=aqua_globals.random_seed,
seed_transpiler=aqua_globals.random_seed)
result = svm.run(quantum_instance)
for k,v in result.items():
print(f'{k} : {v}')
| Quantum Support Vector Machines MultiClass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class Solution:
def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:
n = len(gas)
if n == 0:
return -1
leave = 0
gas += gas
cost += cost
start = 0
while start < n and gas[start] < cost[start]: # 找到可行起点
start += 1
end = start
while start < n and end < 2 * n:
if end - start >= n: # 找到结果
return start
leave += gas[end]
if leave >= cost[end]: # 贪心策略一直向右走
leave -= cost[end]
end += 1
else: # 要寻找新的起始加油站
idx = start
while idx < n and gas[idx] >= cost[idx]: # 找到第一个获取油量为负的节点
idx += 1
while idx < n and gas[idx] < cost[idx]: # 跳过所有获取油量为负的加油站
idx += 1
if idx >= n: # 找不到这样的结果
break
leave = 0
start = idx
if idx > end:
end = idx
return -1
# +
# 写出求和数学表达式来 可以证明出 贪心的性质
# -
| 贪心/#leetcode134.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from typing import List
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
# +
class Solution:
def listInOrder(node:TreeNode) -> List:
pre_list = list()
if node.left is not None:
pre_list += self.listInOrder(node.left)
pre_list += [node.val]
if node.right is not None:
pre_list += self.listInOrder(node.right)
return pre_list
| .ipynb_checkpoints/InOrderGraph-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This example shows how to:
# 1. Load a counts matrix (10X Chromium data from human peripheral blood cells)
# 2. Run the default Scrublet pipeline
# 3. Check that doublet predictions make sense
# %matplotlib inline
import scrublet as scr
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import os
plt.rcParams['font.family'] = 'sans-serif'
plt.rcParams['font.sans-serif'] = 'Arial'
plt.rc('font', size=14)
plt.rcParams['pdf.fonttype'] = 42
# #### Download 8k PBMC data set from 10X Genomics
# Download raw data from this link:
# http://cf.10xgenomics.com/samples/cell-exp/2.1.0/pbmc8k/pbmc8k_filtered_gene_bc_matrices.tar.gz
#
#
# Or use wget:
# !wget http://cf.10xgenomics.com/samples/cell-exp/2.1.0/pbmc8k/pbmc8k_filtered_gene_bc_matrices.tar.gz
# Uncompress:
# !tar xfz pbmc8k_filtered_gene_bc_matrices.tar.gz
# #### Load counts matrix and gene list
# Load the raw counts matrix as a scipy sparse matrix with cells as rows and genes as columns.
# +
input_dir = 'filtered_gene_bc_matrices/GRCh38/'
counts_matrix = scipy.io.mmread(input_dir + '/matrix.mtx').T.tocsc()
genes = np.array(scr.load_genes(input_dir + 'genes.tsv', delimiter='\t', column=1))
print('Counts matrix shape: {} rows, {} columns'.format(counts_matrix.shape[0], counts_matrix.shape[1]))
print('Number of genes in gene list: {}'.format(len(genes)))
# -
# #### Initialize Scrublet object
# The relevant parameters are:
# - *expected_doublet_rate*: the expected fraction of transcriptomes that are doublets, typically 0.05-0.1. Results are not particularly sensitive to this parameter. For this example, the expected doublet rate comes from the Chromium User Guide: https://support.10xgenomics.com/permalink/3vzDu3zQjY0o2AqkkkI4CC
# - *sim_doublet_ratio*: the number of doublets to simulate, relative to the number of observed transcriptomes. This should be high enough that all doublet states are well-represented by simulated doublets. Setting it too high is computationally expensive. The default value is 2, though values as low as 0.5 give very similar results for the datasets that have been tested.
# - *n_neighbors*: Number of neighbors used to construct the KNN classifier of observed transcriptomes and simulated doublets. The default value of `round(0.5*sqrt(n_cells))` generally works well.
#
scrub = scr.Scrublet(counts_matrix, expected_doublet_rate=0.06)
# #### Run the default pipeline, which includes:
# 1. Doublet simulation
# 2. Normalization, gene filtering, rescaling, PCA
# 3. Doublet score calculation
# 4. Doublet score threshold detection and doublet calling
#
doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=2,
min_cells=3,
min_gene_variability_pctl=85,
n_prin_comps=30)
# #### Plot doublet score histograms for observed transcriptomes and simulated doublets
# The simulated doublet histogram is typically bimodal. The left mode corresponds to "embedded" doublets generated by two cells with similar gene expression. The right mode corresponds to "neotypic" doublets, which are generated by cells with distinct gene expression (e.g., different cell types) and are expected to introduce more artifacts in downstream analyses. Scrublet can only detect neotypic doublets.
#
# To call doublets vs. singlets, we must set a threshold doublet score, ideally at the minimum between the two modes of the simulated doublet histogram. `scrub_doublets()` attempts to identify this point automatically and has done a good job in this example. However, if automatic threshold detection doesn't work well, you can adjust the threshold with the `call_doublets()` function. For example:
# ```python
# scrub.call_doublets(threshold=0.25)
# ```
scrub.plot_histogram();
# #### Get 2-D embedding to visualize the results
# +
print('Running UMAP...')
scrub.set_embedding('UMAP', scr.get_umap(scrub.manifold_obs_, 10, min_dist=0.3))
# # Uncomment to run tSNE - slow
# print('Running tSNE...')
# scrub.set_embedding('tSNE', scr.get_tsne(scrub.manifold_obs_, angle=0.9))
# # Uncomment to run force layout - slow
# print('Running ForceAtlas2...')
# scrub.set_embedding('FA', scr.get_force_layout(scrub.manifold_obs_, n_neighbors=5. n_iter=1000))
print('Done.')
# -
# #### Plot doublet predictions on 2-D embedding
# Predicted doublets should co-localize in distinct states.
# +
scrub.plot_embedding('UMAP', order_points=True);
# scrub.plot_embedding('tSNE', order_points=True);
# scrub.plot_embedding('FA', order_points=True);
| examples/scrublet_basics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.5
# language: python
# name: py35
# ---
# # Welcome to Algosoc, the soceity for algorithmic trading and quantitative finance at Imperial
# ## Who are We?
#
# We are founded in March 2018 with the aim to provide education and networking opportunities for students interested in algorithmic trading and quantitative finance. In 2018 and 2019 we organise the Algothon with Blackrock which is the flagship event of our soceity. Algothon 2020 was cancelled this year due to covid-19. This year we will be running courses online.
#
# http://www.algosoc.com/
#
# Please join the society by subscribing on the union website. Membership is free. Becoming a member of the soceity allows you to join our exclusive networking events and receive up-to-date news through our mailing list.
#
# https://www.imperialcollegeunion.org/activities/a-to-z/algorithmic-trading
#
#
# ## Algosoc Portfolio
#
# We will be running trading strategies on different platforms, such as Quantopian. We welcome students to contribute by developing trading strategies and signals. We will guide you through the steps to write your first algo. Details in how to contribute will be annouced through our mailing list.
#
# Our asset universe includes US Equities and Futures. Most stratgies running are at daily resolution
#
#
# ## Algosoc Toolbox
#
# We are building a collection of tools useful for algorithmic trading which are avaliable on Github. If you would like to contribute, please email us.
#
# https://github.com/algotradingsoc/pedlar_2020
#
#
#
# Plan for today
#
# We will go through the really basics of (quantitative) finance for the rest of the session followed by an Q&A. Experienced members are wellcomed to check out the tools we are building on Github.
#
#
# The notebook uses data from Quantopian which could only be run on their platform due to data licensing. Please create an account at Quantopian https://www.quantopian.com/ as most teaching is done on Quantopian
# Lecture 1: Understanding financial data
# ## Price data
#
# The most basic data to be dealt with in quantitative finance is price data, which represent how much a financial asset is worth at a given time. Traditionally (90s), time series models such as ARIMA model are used widely in trading to capture trends in the market. Recently, the focus has moved to using deep learning models such as LSTM and CNN to learn more complicated behaviour.
#
# https://reference.wolfram.com/language/ref/ARIMAProcess.html
# https://www.tensorflow.org/api_docs/python/tf/keras/layers/LSTM
#
# Price data is determined by two charactereistics, frequency of data and types of data. Frequency of data can ranged from microseconds data (tick level) to end-of-data data (daily resolution). We will focus on data that have a minute resoluion or above. Major types of data include bar data, trade data and quote data.
#
# In an exchange, quote data (Orderbook) is displayed for a stock in real time which has the bid and ask price (with size) submitted by different market participants. A trade is made if someone is willing the buy the security at the ask price (or sell the security at the bid price). Bar data is then aggregrated using the trade price and volume over an interval, which usually is a minute, an hour and a day.
#
# https://iextrading.com/apps/tops/
#
#
#
#
#
#
# ### Bar data on Quantopian
#
# Bar data for stocks are provided in the following format (OHLCV), which summarise transactions conducted within the time period. 4 representative prices are provided, namely Open, High, Low, Close. Volume is the sum of the number of shares traded over the time period.
#
# https://images.app.goo.gl/7QKVKYWw9jpC4jVx5
#
#
# Example of daily bar data of SPY, SPDR S&P 500, the most popular ETF to track the Standard & Poor's 500 Index.
from quantopian.research.experimental import history
from quantopian.research import symbols
SPY = history('SPY', fields=['open_price','high','low','close_price','volume'],frequency='daily',start='2020-01-01',end='2020-06-30')
SPY.tail(10)
SPY['close_price'].plot()
# ## Pipeline on Quantopian
#
# Data on quantopian are organised into different datasets which can be aggregated using Pipelines in research notebook and trading algos. Pipelines can be regarded as pandas dataframes with index by time and assets.
#
# Data Reference: https://www.quantopian.com/docs/data-reference/overview
# Pipeline: https://www.quantopian.com/tutorials/pipeline
#
#
# +
import datetime
from quantopian.pipeline import CustomFactor, Pipeline
from quantopian.pipeline.data.morningstar import Fundamentals
from quantopian.pipeline.domain import US_EQUITIES, GB_EQUITIES, HK_EQUITIES, DE_EQUITIES
from quantopian.research import run_pipeline, symbols
from quantopian.pipeline.data import EquityPricing
from quantopian.pipeline.factors import Returns
from quantopian.pipeline.filters import StaticAssets, QTradableStocksUS
# Get the latest daily close price for all equities.
yesterday_close = EquityPricing.close.latest
# Get the latest daily trading volume for all equities.
yesterday_volume = EquityPricing.volume.latest
Top_350 = Fundamentals.market_cap.latest.rank(ascending=False) <= 350
Stocks = ['HSY','MSFT','GLW','AXP','FB','NVR','RL','WPM','KKR','FCX']
# Add the factor to the pipeline.
pipe = Pipeline({
'close': yesterday_close,
'volume': yesterday_volume,
'return': Returns([EquityPricing.close],window_length=252),
'cap': Fundamentals.market_cap.latest,
'eps': Fundamentals.normalized_diluted_eps_earnings_reports.latest
},
domain = US_EQUITIES,
# screen = Top_350 & (EquityPricing.volume.latest>1000),
screen = StaticAssets(symbols(Stocks)),
)
refday = datetime.datetime.now() + datetime.timedelta(days=-1)
today = refday.replace(year=refday.year-1).date()
yesterday = today.replace(year=today.year-2)
print('Start date {} End date {}'.format(yesterday,today))
df = run_pipeline(pipe, yesterday, refday)
# Run the pipeline over a year and print the result.
df.tail(20)
# -
df.tail(20)
# Demo: Sample algo provided by Quantopian
#
# We will run a sample algorithm provided by Quantopian to show how to use the backtest environment and how do we evaluate the perfomance of a trading algorithm
# ### Future plans
#
# Lecture 2: How to write a simple algo on Quantopian
# Lecture 3: Non-stationaity of time-series data
# Lecture 4: Using decision trees on fundamentals data
| Notebook/2020_21/Lecture 1 Understanding financial data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import ltfatpy
import matplotlib as mpl
# %matplotlib inline
L = 2**14
M = 512
a = 128
signal = ltfatpy.signals.gspi.gspi()[0][:L]
signal = ltfatpy.signals.greasy.greasy()[0][:L]
gs2 = {'name': 'gauss', 'tfr': 4}
rep2 = ltfatpy.dgtreal(signal, gs2, a, M)[0]
# +
# -*- coding: utf-8 -*-
# ######### COPYRIGHT #########
# Credits
# #######
#
# Copyright(c) 2015-2018
# ----------------------
#
# * `LabEx Archimède <http://labex-archimede.univ-amu.fr/>`_
# * `Laboratoire d'Informatique Fondamentale <http://www.lif.univ-mrs.fr/>`_
# (now `Laboratoire d'Informatique et Systèmes <http://www.lis-lab.fr/>`_)
# * `Institut de Mathématiques de Marseille <http://www.i2m.univ-amu.fr/>`_
# * `Université d'Aix-Marseille <http://www.univ-amu.fr/>`_
#
# This software is a port from LTFAT 2.1.0 :
# Copyright (C) 2005-2018 <NAME> <<EMAIL>>.
#
# Contributors
# ------------
#
# * <NAME> <contact.dev_AT_lis-lab.fr>
# * <NAME> <contact.dev_AT_lis-lab.fr>
#
# Description
# -----------
#
# ltfatpy is a partial Python port of the
# `Large Time/Frequency Analysis Toolbox <http://ltfat.sourceforge.net/>`_,
# a MATLAB®/Octave toolbox for working with time-frequency analysis and
# synthesis.
#
# Version
# -------
#
# * ltfatpy version = 1.0.16
# * LTFAT version = 2.1.0
#
# Licence
# -------
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# ######### COPYRIGHT #########
"""Module of phase gradient computation
Ported from ltfat_2.1.0/gabor/gabphasegrad.m
.. moduleauthor:: <NAME>
"""
from __future__ import print_function, division
import numpy as np
from ltfatpy.comp.comp_sigreshape_pre import comp_sigreshape_pre
from ltfatpy.gabor.dgtlength import dgtlength
from ltfatpy.gabor.gabwin import gabwin
from ltfatpy.tools.postpad import postpad
from ltfatpy.fourier.fftindex import fftindex
from ltfatpy.comp.comp_sepdgt import comp_sepdgt
from ltfatpy.fourier.pderiv import pderiv
def modgabphasegrad(method, *args, **kwargs):
"""Phase gradient of the discrete Gabor transform
- Usage:
| ``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, L=None)``
| ``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)``
| ``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, difforder=4)``
- Input parameters:
:param str method: Method used to compute the phase gradient, see the
possible values below
:param numpy.ndarray f: (defined if ``method='dgt'``) Input signal
:param numpy.ndarray cphase: (defined if ``method='phase'``) Phase of a
:func:`~ltfatpy.gabor.dgt.dgt` of the signal
:param numpy.ndarray s: (defined if ``method='abs'``) Spectrogram of the
signal
:param numpy.ndarray g: (defined if ``method='dgt'`` or ``method='phase'``)
Window function
:param int a: (defined if ``method='dgt'`` or ``method='phase'`` or
``method='abs'``) Length of time shift
:param int M: (defined if ``method='dgt'``) Number of channels
:param int L: (defined if ``method='dgt'``, optional) Length of transform
to do
:param int difforder: (defined if ``method='abs'``, optional) Order of the
centered finite difference scheme used to perform the needed numerical
differentiation
- Output parameters:
:returns: ``(tgrad, fgrad, c)`` if ``method='dgt'``, or ``(tgrad, fgrad)``
if ``method='phase'`` or ``method='abs'``
:rtype: tuple
:var numpy.ndarray tgrad: Instantaneous frequency
:var numpy.ndarray fgrad: Local group delay
:var numpy.ndarray c: Gabor coefficients
``gabphasegrad`` computes the time-frequency gradient of the phase of the
:func:`~ltfatpy.gabor.dgt.dgt` of a signal. The derivative in time
**tgrad** is the instantaneous frequency while the frequency derivative
**fgrad** is the local group delay.
**tgrad** and **fgrad** measure the deviation from the current time and
frequency, so a value of zero means that the instantaneous frequency is
equal to the center frequency of the considered channel.
**tgrad** is scaled such that distances are measured in samples. Similarly,
**fgrad** is scaled such that the Nyquist frequency (the highest possible
frequency) corresponds to a value of ``L/2``.
The computation of **tgrad** and **fgrad** is inaccurate when the absolute
value of the Gabor coefficients is low. This is due to the fact the the
phase of complex numbers close to the machine precision is almost
random. Therefore, **tgrad** and **fgrad** may attain very large random
values when ``abs(c)`` is close to zero.
The computation can be done using three different methods:
=========== ===========================================================
``'dgt'`` Directly from the signal.
``'phase'`` From the phase of a :func:`~ltfatpy.gabor.dgt.dgt` of the
signal. This is the classic method used in the phase
vocoder.
``'abs'`` From the absolute value of the
:func:`~ltfatpy.gabor.dgt.dgt`. Currently this method works
only for Gaussian windows.
=========== ===========================================================
``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` computes the
time-frequency gradient using a :func:`~ltfatpy.gabor.dgt.dgt` of the
signal **f**. The :func:`~ltfatpy.gabor.dgt.dgt` is computed using the
window **g** on the lattice specified by the time shift **a** and the
number of channels **M**. The algorithm used to perform this calculation
computes several DGTs, and therefore this routine takes the exact same
input parameters as :func:`~ltfatpy.gabor.dgt.dgt`.
The window **g** may be specified as in :func:`~ltfatpy.gabor.dgt.dgt`. If
the window used is ``'gauss'``, the computation will be done by a faster
algorithm.
``(tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M)`` additionally
returns the Gabor coefficients ``c``, as they are always computed as a
byproduct of the algorithm.
``(tgrad, fgrad) = gabphasegrad('phase', cphase, a)`` computes the phase
gradient from the phase **cphase** of a :func:`~ltfatpy.gabor.dgt.dgt` of
the signal. The original :func:`~ltfatpy.gabor.dgt.dgt` from which the
phase is obtained must have been computed using a time-shift of **a**.
``(tgrad, fgrad) = gabphasegrad('abs', s, g, a)`` computes the phase
gradient from the spectrogram **s**. The spectrogram must have been
computed using the window **g** and time-shift **a**.
``(tgrad, fgrad) = gabphasegrad('abs', s, g, a, difforder=ord)`` uses a
centered finite difference scheme of order ``ord`` to perform the needed
numerical differentiation. Default is to use a 4th order scheme.
Currently the 'abs' method only works if the window **g** is a Gaussian
window specified as a string or cell array.
.. seealso:: :func:`resgram`, :func:`gabreassign`,
:func:`~ltfatpy.gabor.dgt.dgt`
- References:
:cite:`aufl95,cmdaaufl97,fl65`
"""
# NOTE: This function doesn't support the parameter lt (lattice type)
# supported by the corresponding octave function and the lattice used is
# seperable (square lattice lt = (0, 1)).
# NOTE: As in the octave version of this function, if needed, the
# undocumented optional keyword minlvl is available when using method=dgt.
# So it can be passed using a call of the following form:
# (tgrad, fgrad, c) = gabphasegrad('dgt', f, g, a, M, minlvl=val)
if not isinstance(method, str):
raise TypeError('First argument must be a str containing the method '
'name, "dgt", "phase" or "abs".')
method = method.lower()
if method == 'dgt':
# --------------------------- DGT method ------------------------
(f, g, a, M) = args
if 'L' in kwargs:
L = kwargs['L']
else:
L = None
if 'minlvl' in kwargs:
minlvl = kwargs['minlvl']
else:
minlvl = np.finfo(np.float64).tiny
# # ----- step 1 : Verify f and determine its length -------
# Change f to correct shape.
f, Ls, W, wasrow, remembershape = comp_sigreshape_pre(f, 0)
# # ------ step 2: Verify a, M and L
if not L:
# ----- step 2b : Verify a, M and get L from the signal length f---
L = dgtlength(Ls, a, M)
else:
# ----- step 2a : Verify a, M and get L
Luser = dgtlength(L, a, M)
if Luser != L:
raise ValueError('Incorrect transform length L = {0:d} '
'specified. Next valid length is L = {1:d}. '
'See the help of dgtlength for the '
'requirements.'.format(L, Luser))
# # ----- step 3 : Determine the window
g, info = gabwin(g, a, M, L)
if L < info['gl']:
raise ValueError('Window is too long.')
# # ----- step 4: final cleanup ---------------
f = postpad(f, L)
# # ------ algorithm starts --------------------
# Compute the time weighted version of the window.
hg = fftindex(L) * g
# The computation done this way is insensitive to whether the dgt is
# phaselocked or not.
c = comp_sepdgt(f, g, a, M, 0)
c_h = comp_sepdgt(f, hg, a, M, 0)
c_s = np.abs(c)**2
# Remove small values because we need to divide by c_s
c_s = np.maximum(c_s, minlvl*np.max(c_s))
# Compute the group delay
fgrad = np.real(c_h * c.conjugate() / c_s)
if info['gauss']:
# The method used below only works for the Gaussian window, because
# the time derivative and the time multiplicative of the Gaussian
# are identical.
tgrad = np.imag(c_h * c.conjugate() / c_s) / info['tfr']
else:
# The code below works for any window, and not just the Gaussian
dg = pderiv(g, difforder=float('inf')) / (2*np.pi)
c_d = comp_sepdgt(f, dg, a, M, 0)
# NOTE: There is a bug here in the original octave file as it
# contains a reshape that uses an undefined variable N.
# You can get the error with LTFAT 2.1.0 in octave by running for
# example:
# gabphasegrad('dgt', rand(16,1), rand(16,1), 4, 16)
#
# So we just comment out the corresponding line here, as it appears
# to be unneeded:
# c_d.shape = (M, N, W)
# Compute the instantaneous frequency
tgrad = -np.imag(c_d * c.conjugate() / c_s)
return (tgrad, fgrad, c)
elif method == 'phase':
# --------------------------- phase method ------------------------
(cphase, a, M) = args
if not np.isrealobj(cphase):
raise TypeError("Input phase must be real valued. Use the 'angle'"
" function to compute the argument of complex "
"numbers.")
# --- linear method ---
if cphase.ndim == 3:
M2, N, W = cphase.shape # M2 is the number of channels from 0 to Nyquist
else:
M2, N = cphase.shape # M2 is the number of channels from 0 to Nyquist
L = N*a
b = L/M
# NOTE: The following code found in the original octave version of the function
# hasn't been translated here to Python as it is not used:
# if 0
#
# # This is the classic phase vocoder algorithm by Flanagan.
#
# tgrad = cphase-circshift(cphase,[0,-1]);
# tgrad = tgrad- 2*pi*round(tgrad/(2*pi));
# tgrad = -tgrad/(2*pi)*L;
#
# # Phase-lock the angles.
# TimeInd = (0:(N-1))*a;
# FreqInd = (0:(M-1))/M;
#
# phl = FreqInd'*TimeInd;
# cphase = cphase+2*pi.*phl;
#
# fgrad = cphase-circshift(cphase,[1,0]);
# fgrad = fgrad- 2*pi*round(fgrad/(2*pi));
# fgrad = -fgrad/(2*pi)*L;
#
# end;
# This is the classic phase vocoder algorithm by Flanagan modified to
# yield a second order centered difference approximation.
# Forward approximation
tgrad_1 = cphase - np.roll(cphase, -1, axis=1)
# numpy round function doesn't use the same convention than octave for
# half-integers but the standard Python round function uses the same
# convention than octave, so we use the Python standard round in the
# computation below
octave_round = np.vectorize(round)
tgrad_1 = tgrad_1 - 2*np.pi*octave_round(tgrad_1/(2*np.pi))
# Backward approximation
tgrad_2 = np.roll(cphase, 1, axis=1) - cphase
tgrad_2 = tgrad_2 - 2*np.pi*octave_round(tgrad_2/(2*np.pi))
# Average
tgrad = (tgrad_1+tgrad_2) / 2
tgrad = -tgrad / (2*np.pi*a) * L
# Phase-lock the angles.
TimeInd = np.arange(N) * a
FreqInd = np.arange(M2) / M
phl = np.dot(FreqInd.reshape((FreqInd.shape[0], 1)),
TimeInd.reshape((1, TimeInd.shape[0])))
# NOTE: in the following lines, the shape of phl is changed so that
# broadcasting works in the following addition with cphase when cphase
# has more than two dimensions
new_shape = np.ones((len(cphase.shape), ), dtype=int)
new_shape[0] = phl.shape[0]
new_shape[1] = phl.shape[1]
phl = phl.reshape(tuple(new_shape))
cphase = cphase + 2*np.pi*phl
cphase_to_aprox = np.concatenate([-cphase[1:2], cphase, -cphase[-2:-1]])
# Forward approximation
fgrad_1 = cphase_to_aprox - np.roll(cphase_to_aprox, -1, axis=0)
fgrad_1 = fgrad_1 - 2*np.pi*octave_round(fgrad_1/(2*np.pi))
fgrad_1 = fgrad_1[1:-1]
# Backward approximation
fgrad_2 = np.roll(cphase_to_aprox, 1, axis=0) - cphase_to_aprox
fgrad_2 = fgrad_2 - 2*np.pi*octave_round(fgrad_2/(2*np.pi))
fgrad_2 = fgrad_2[1:-1]
# Average
fgrad = (fgrad_1+fgrad_2)/2
fgrad = fgrad/(2*np.pi*b)*L
return (tgrad, fgrad)
elif method == 'abs':
# --------------------------- abs method ------------------------
(s, g, a) = args
if 'difforder' in kwargs:
difforder = kwargs['difforder']
else:
difforder = 4
if not np.all(s >= 0.):
raise ValueError('First input argument must be positive or zero.')
if s.ndim == 3:
M, N, W = s.shape
else:
M, N = s.shape
L = N*a
g, info = gabwin(g, a, M, L)
if not info['gauss']:
raise ValueError('The window must be a Gaussian window (specified '
'as a string or as a dictionary).')
b = L/M
# We must avoid taking the log of zero.
# Therefore we add the smallest possible
# number
logs = np.log(s + np.finfo(s.dtype).tiny)
# XXX REMOVE Add a small constant to limit the dynamic range. This
# should lessen the problem of errors in the differentiation for points
# close to (but not exactly) zeros points.
maxmax = np.max(logs)
tt = -11.
logs[logs < (maxmax+tt)] = tt
fgrad = pderiv(logs, 1, difforder)/(2*np.pi)*info['tfr']
tgrad = pderiv(logs, 0, difforder)/(2*np.pi*info['tfr'])
return (tgrad, fgrad)
else:
raise ValueError("First argument must be the method name, 'dgt', "
"'phase' or 'abs'.")
# +
def unwrap_s(series):
res = [series[0]]
jump = 0
v = -np.diff(series)
# print(np.max(v))
for val,d in zip(series[1:], v):
if d>np.pi:
jump += 2*np.pi
elif d<-np.pi:
jump -= 2*np.pi
res.append(jump+val)
return np.array(res)
def unwrap(mat):
matres = np.zeros(mat.shape)
for i, lin in enumerate(mat):
matres[i] = unwrap_s(lin)
return matres
# +
L = 2000;
M = 2000;
M2 = int(M/2+1)
a = 1
N = L/a
b = L/M
tfr = a/b
f0 = np.arange(L)
f3 = np.zeros([L])
for kk in range(1,10,2):
# f3 = f3 + np.exp(2*np.pi*1j*f0*kk/20)
f3 = f3 + 2*kk*np.cos(2*np.pi*f0*kk/20)
f4 = np.zeros([L])
f4[100:L:400] = np.arange(2,7)*np.sqrt(L);
f = f3+f4;
f = np.real(f)
# f = signal[:L]
c = ltfatpy.dgtreal(f,{'name': 'gauss', 'tfr': 4},a,M)[0]
tgrad, fgrad = modgabphasegrad('phase',np.angle(c),a, M)
# +
import matplotlib
font = {'family' : 'sans-serif',
'weight' : 'bold',
'size' : 16}
matplotlib.rc('font', **font)
mpl.rcParams['image.cmap'] = 'inferno'
plt.figure(figsize=(21,3));
plt.subplot(131);
ltfatpy.plotdgtreal(c,a,M,dynrange=60);
plt.title('Log magnitude')
# plt.subplot(142);
# ltfatpy.plotdgtreal(unwrap(np.angle(c)),a,M, normalization='lin');
# plt.title('Phase unwrapped in time')
plt.subplot(132);
ltfatpy.plotdgtreal(tgrad*(np.abs(c) > np.exp(-5)),a,M,clim=[-50,50], normalization='lin');
plt.title('Phase time-derivative')
plt.subplot(133);
ltfatpy.plotdgtreal(fgrad*(np.abs(c) > np.exp(-5)),a,M,clim=[-100,100], normalization='lin');
plt.title('Phase frequency-derivative')
plt.savefig("intro-TF-phasepaper.pdf", bbox_inches='tight')
# +
plt.figure(figsize=(15, 2))
plt.subplot(121)
plt.plot(f)
plt.xlim([0,L-1])
plt.xlabel('Time (sample)')
plt.title('Signal')
plt.subplot(122)
lim = [50,150-1]
plt.plot(range(*lim),f[lim[0]:lim[1]])
plt.xlim(lim)
plt.xlabel('Time (sample)')
plt.title('Zoom in')
plt.savefig("intro-time-phasepaper.pdf", bbox_inches='tight')
# -
| plots/representations-figure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
f = pd.read_pickle('../data/processed/model_ready.pkl')
f
# +
X = f.loc[:,'rating':]
y = f['mid_salary'].values
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=42)
# +
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
# -
# Lets start by trying a linear model
from sklearn import linear_model
from sklearn.linear_model import LinearRegression
linreg = LinearRegression().fit(X_train_scaled, y_train)
print('linear model coeff (w): {}'
.format(linreg.coef_))
print('linear model intercept (b): {:.3f}'
.format(linreg.intercept_))
print('R-squared score (training): {:.3f}'
.format(linreg.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}'
.format(linreg.score(X_test_scaled, y_test)))
# That was horrible. How can r^2 even be neg? Prob too many multicolinear features. Next we try ridge which penalizes high coeff.
# +
from sklearn.linear_model import Ridge
import numpy as np
linridge = Ridge(alpha=20.0).fit(X_train_scaled, y_train)
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
print('Jobs dataset')
print('ridge regression linear model intercept: {}'
.format(linridge.intercept_))
print('ridge regression linear model coeff:\n{}'
.format(linridge.coef_))
print('R-squared score (training): {:.3f}'
.format(linridge.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}'
.format(linridge.score(X_test_scaled, y_test)))
print('Number of non-zero features: {}'
.format(np.sum(linridge.coef_ != 0)))
# -
# Def better than before. Let's try lasso regression which will force some coeff to be zero.
X
# +
from sklearn.linear_model import Lasso
linlasso = Lasso(alpha=2.0, max_iter = 10000).fit(X_train_scaled, y_train)
print('Jobs dataset')
print('lasso regression linear model intercept: {}'
.format(linlasso.intercept_))
print('lasso regression linear model coeff:\n{}'
.format(linlasso.coef_))
print('Non-zero features: {}'
.format(np.sum(linlasso.coef_ != 0)))
print('R-squared score (training): {:.3f}'
.format(linlasso.score(X_train_scaled, y_train)))
print('R-squared score (test): {:.3f}\n'
.format(linlasso.score(X_test_scaled, y_test)))
print('Features with non-zero weight (sorted by absolute magnitude):')
for e in sorted (list(zip(list(X), linlasso.coef_)),
key = lambda e: -abs(e[1])):
if e[1] != 0:
print('\t{}, {:.3f}'.format(e[0], e[1]))
# -
# Lasso regression performed slightly worse than ridge regression.
# It thinks that audiovisual industry pays a lot more. Interesting.
ff = pd.read_pickle('../data/interim/combined.pkl')
pd.set_option('max_rows',1000)
by_industry = ff.groupby('industry')['mid_salary'].agg(['mean','median','count']).sort_values(by='mean',ascending=False).reset_index()
by_industry[by_industry['industry'].str.contains('visual')]
# Audiovisual is ranked 32nd in terms of industry, so i don't see why it should matter much at all in the regression.
# +
print('Lasso regression: effect of alpha regularization\n\
parameter on number of features kept in final model\n')
for alpha in [0.5, 1, 2, 3, 5, 10, 20, 50, 100]:
linlasso = Lasso(alpha, max_iter = 10000).fit(X_train_scaled, y_train)
r2_train = linlasso.score(X_train_scaled, y_train)
r2_test = linlasso.score(X_test_scaled, y_test)
print('Alpha = {:.2f}\nFeatures kept: {}, r-squared training: {:.2f}, \
r-squared test: {:.2f}\n'
.format(alpha, np.sum(linlasso.coef_ != 0), r2_train, r2_test))
# -
# It seems like the fewer features the better up to a point.
# 
# +
from sklearn.metrics import mean_absolute_error as mae
mae(y_test,y_pred)
| notebooks/8- Fitting Models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# **[HWR-01]** 必要なモジュールをインポートします。
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data
# **[HWR-02]** 1段目の畳み込みフィルターとプーリング層を定義します。
# +
num_filters1 = 32
x = tf.placeholder(tf.float32, [None, 784])
x_image = tf.reshape(x, [-1,28,28,1])
W_conv1 = tf.Variable(tf.truncated_normal([5,5,1,num_filters1],
stddev=0.1))
h_conv1 = tf.nn.conv2d(x_image, W_conv1,
strides=[1,1,1,1], padding='SAME')
b_conv1 = tf.Variable(tf.constant(0.1, shape=[num_filters1]))
h_conv1_cutoff = tf.nn.relu(h_conv1 + b_conv1)
h_pool1 =tf.nn.max_pool(h_conv1_cutoff, ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
# -
# **[HWR-03]** 2段目の畳み込みフィルターとプーリング層を定義します。
# +
num_filters2 = 64
W_conv2 = tf.Variable(
tf.truncated_normal([5,5,num_filters1,num_filters2],
stddev=0.1))
h_conv2 = tf.nn.conv2d(h_pool1, W_conv2,
strides=[1,1,1,1], padding='SAME')
b_conv2 = tf.Variable(tf.constant(0.1, shape=[num_filters2]))
h_conv2_cutoff = tf.nn.relu(h_conv2 + b_conv2)
h_pool2 =tf.nn.max_pool(h_conv2_cutoff, ksize=[1,2,2,1],
strides=[1,2,2,1], padding='SAME')
# -
# **[HWR-04]** 全結合層、ドロップアウト層、ソフトマックス関数を定義します。
# +
h_pool2_flat = tf.reshape(h_pool2, [-1, 7*7*num_filters2])
num_units1 = 7*7*num_filters2
num_units2 = 1024
w2 = tf.Variable(tf.truncated_normal([num_units1, num_units2]))
b2 = tf.Variable(tf.constant(0.1, shape=[num_units2]))
hidden2 = tf.nn.relu(tf.matmul(h_pool2_flat, w2) + b2)
keep_prob = tf.placeholder(tf.float32)
hidden2_drop = tf.nn.dropout(hidden2, keep_prob)
w0 = tf.Variable(tf.zeros([num_units2, 10]))
b0 = tf.Variable(tf.zeros([10]))
p = tf.nn.softmax(tf.matmul(hidden2_drop, w0) + b0)
# -
# **[HWR-05]** セッションを用意して Variable を初期化した後、最適化処理を実施済みのセッションを復元します。
sess = tf.Session()
sess.run(tf.initialize_all_variables())
saver = tf.train.Saver()
saver.restore(sess, 'cnn_session-20000')
# **[HWR-06]** 手書き文字を入力するためのJavaScriptのコードを用意します。
# +
input_form = """
<table>
<td style="border-style: none;">
<div style="border: solid 2px #666; width: 143px; height: 144px;">
<canvas width="140" height="140"></canvas>
</div></td>
<td style="border-style: none;">
<button onclick="clear_value()">Clear</button>
</td>
</table>
"""
javascript = """
<script type="text/Javascript">
var pixels = [];
for (var i = 0; i < 28*28; i++) pixels[i] = 0
var click = 0;
var canvas = document.querySelector("canvas");
canvas.addEventListener("mousemove", function(e){
if (e.buttons == 1) {
click = 1;
canvas.getContext("2d").fillStyle = "rgb(0,0,0)";
canvas.getContext("2d").fillRect(e.offsetX, e.offsetY, 8, 8);
x = Math.floor(e.offsetY * 0.2)
y = Math.floor(e.offsetX * 0.2) + 1
for (var dy = 0; dy < 2; dy++){
for (var dx = 0; dx < 2; dx++){
if ((x + dx < 28) && (y + dy < 28)){
pixels[(y+dy)+(x+dx)*28] = 1
}
}
}
} else {
if (click == 1) set_value()
click = 0;
}
});
function set_value(){
var result = ""
for (var i = 0; i < 28*28; i++) result += pixels[i] + ","
var kernel = IPython.notebook.kernel;
kernel.execute("image = [" + result + "]");
}
function clear_value(){
canvas.getContext("2d").fillStyle = "rgb(255,255,255)";
canvas.getContext("2d").fillRect(0, 0, 140, 140);
for (var i = 0; i < 28*28; i++) pixels[i] = 0
}
</script>
"""
# -
# **[HWR-07]** JavaScriptを実行して、手書き文字を入力します。入力結果は変数 image に格納されます。
from IPython.display import HTML
HTML(input_form + javascript)
# **[HWR-08]** 入力した文字に対して、CNNで確率を計算して表示します。
# +
p_val = sess.run(p, feed_dict={x:[image], keep_prob:1.0})
fig = plt.figure(figsize=(4,2))
pred = p_val[0]
subplot = fig.add_subplot(1,1,1)
subplot.set_xticks(range(10))
subplot.set_xlim(-0.5,9.5)
subplot.set_ylim(0,1)
subplot.bar(range(10), pred, align='center')
# -
# **[HWR-09]** 1段目のフィルターを適用した画像を表示します。
#
# ここでは、小さなピクセル値をカットする前と後のそれぞれの画像を表示します。
# +
conv1_vals, cutoff1_vals = sess.run(
[h_conv1, h_conv1_cutoff], feed_dict={x:[image], keep_prob:1.0})
fig = plt.figure(figsize=(16,4))
for f in range(num_filters1):
subplot = fig.add_subplot(4, 16, f+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(conv1_vals[0,:,:,f],
cmap=plt.cm.gray_r, interpolation='nearest')
for f in range(num_filters1):
subplot = fig.add_subplot(4, 16, num_filters1+f+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(cutoff1_vals[0,:,:,f],
cmap=plt.cm.gray_r, interpolation='nearest')
# -
# **[HWR-10]** 2段目のフィルターを適用した画像を表示します。
#
# ここでは、小さなピクセル値をカットする前と後のそれぞれの画像を表示します。
# +
conv2_vals, cutoff2_vals = sess.run(
[h_conv2, h_conv2_cutoff], feed_dict={x:[image], keep_prob:1.0})
fig = plt.figure(figsize=(16,8))
for f in range(num_filters2):
subplot = fig.add_subplot(8, 16, f+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(conv2_vals[0,:,:,f],
cmap=plt.cm.gray_r, interpolation='nearest')
for f in range(num_filters2):
subplot = fig.add_subplot(8, 16, num_filters2+f+1)
subplot.set_xticks([])
subplot.set_yticks([])
subplot.imshow(cutoff2_vals[0,:,:,f],
cmap=plt.cm.gray_r, interpolation='nearest')
| Chapter05/Handwriting recognizer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from GPyOpt.methods import BayesianOptimization
import matplotlib.pyplot as plt
import numpy as np
import progressbar
# results in this format from the output of the multi-threaded c++ code.
resultsFinal = np.asarray([
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0,
3e-08, 0, 0, 0, 0, 0,
6e-08, 0, 0, 0, 0, 0,
2.5e-07, 0, 0, 0, 0, 0,
7.1e-07, 0, 0, 0, 0, 0,
1.65e-06, 0, 0, 0, 0, 0,
4.9e-06, 0, 0, 0, 0, 0,
1.178e-05, 0, 0, 0, 0, 0,
2.797e-05, 0, 0, 0, 0, 0,
6.254e-05, 0, 0, 0, 0, 0,
0.00013422, 2e-08, 0, 0, 0, 0,
0.00027849, 7e-08, 0, 0, 0, 0,
0.00054056, 1.5e-07, 0, 0, 0, 0,
0.00101865, 8.4e-07, 0, 0, 0, 0,
0.00185053, 3.71e-06, 0, 0, 0, 0,
0.00323235, 1.331e-05, 0, 0, 0, 0,
0.0054424, 3.902e-05, 0, 0, 0, 0,
0.00889944, 0.00011233, 1e-08, 0, 0, 0,
0.0140104, 0.00029576, 8e-08, 0, 0, 0,
0.0214406, 0.00073342, 1.19e-06, 0, 0, 0,
0.0318141, 0.00168044, 6.31e-06, 2e-08, 0, 0,
0.0459994, 0.00364552, 3.162e-05, 2e-08, 0, 0,
0.0646293, 0.00743669, 0.0001314, 4.6e-07, 0, 0,
0.0885937, 0.0142396, 0.00049523, 4.14e-06, 2e-08, 0,
0.118352, 0.0257663, 0.00162757, 3.233e-05, 3.6e-07, 0,
0.154433, 0.0440788, 0.00472106, 0.00020428, 3.66e-06, 1e-08,
0.196837, 0.0715445, 0.0122646, 0.00105628, 4.37e-05, 9.2e-07,
0.245502, 0.110379, 0.0283845, 0.00443653, 0.00041492, 2.269e-05,
0.299761, 0.162142, 0.0591246, 0.0153717, 0.00283166, 0.00037416,
0.358843, 0.227416, 0.111237, 0.0440746, 0.0141348, 0.00365242,
0.421386, 0.305263, 0.189755, 0.105458, 0.0524408, 0.0232268,
0.485932, 0.392829, 0.295232, 0.212378, 0.146406, 0.0962258,
0.550785, 0.486158, 0.421551, 0.364836, 0.314247, 0.268755,])
resultsFinal = np.reshape(resultsFinal, (40,6))
# +
# honest network delay over next n blocks.
def vectorDelayHonest(ps, es, init_endorsers, delay_priority, delay_endorse):
return (60 * len(ps)
+ delay_priority * sum(ps)
+ sum([delay_endorse * max(init_endorsers - e, 0) for e in es]))
# attacking network delay over next n blocks.
def vectorDelayAttacker(ps, es, init_endorsers, delay_priority, delay_endorse):
return (60 * len(ps)
+ delay_priority * sum(ps)
+ sum([delay_endorse * max(init_endorsers - e, 0) for e in es[1:]]))
# efficient sample generation
def getAH(alpha):
x = np.random.geometric(1-alpha)
if x == 1:
h = 0
a = np.random.geometric(alpha)
else:
a = 0
h = x - 1
return [a, h]
# -
def getProbReorg(alpha, length, init_endorsers, delay_priority, delay_endorse, sample_size = int(1e5)):
bar = progressbar.ProgressBar()
feasible_count = 0
for _ in range(sample_size):
aVals = []
hVals = []
for i in range(length):
a, h = getAH(alpha)
aVals.append(a)
hVals.append(h)
eVals = np.random.binomial(32, alpha, size = length)
honest_delay = vectorDelayHonest(hVals, 32 - eVals, init_endorsers, delay_priority, delay_endorse)
selfish_delay = vectorDelayAttacker(aVals, eVals, init_endorsers, delay_priority, delay_endorse)
if selfish_delay <= honest_delay:
feasible_count += 1
return feasible_count / sample_size
length_20_probs = resultsFinal[:,2]
length_20_probs
length_20_probs_nonzero = length_20_probs[24:]
length_20_probs_nonzero
length_20_probs_nonzero_geq105 = length_20_probs[28:]
length_20_probs_nonzero_geq105
alphas_geq105 = np.arange(0.38, 0.50, 0.01)
alphas_geq105
def objective(inputs):
print(inputs)
val = 0
bar = progressbar.ProgressBar()
for i in bar(range(12)):
prob = getProbReorg(alpha = alphas_geq105[i],
length=20,
init_endorsers = inputs[0][0],
delay_priority = inputs[0][1],
delay_endorse = inputs[0][2])
val += length_20_probs_nonzero_geq105 * prob
return val
domain = [{'name': 'init_endorsers', 'type': 'discrete', 'domain': tuple(range(33))},
{'name': 'delay_priority', 'type': 'discrete', 'domain': tuple(range(100))},
{'name': 'delay_endorse', 'type': 'discrete', 'domain': tuple(range(100))}]
opt = BayesianOptimization(f = objective, domain = domain)
opt.run_optimization(max_iter = 100)
opt.plot_acquisition()
| mikes_bayesian.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from cartoframes.auth import set_default_context
from cartoframes.viz import Map, Layer, basemaps
set_default_context('https://cartovl.carto.com/')
# -
Map(Layer('populated_places'), basemap=basemaps.positron)
Map(Layer('populated_places'), basemap=None)
Map(Layer('populated_places'), basemap='#fabada')
| examples/debug/API/basemaps.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
library(ez)
library(car)
library(nlme)
require(multcomp)
library(phia)
df = read.csv("./out/speeds.csv")
# We are interested in the grip area which are caused by the fingers on the back.
# Thus, we exclude the thumb in our ANOVAs.
df = df[df$Finger != "Thumb_Fn",]
summary(df)
df$Finger <- factor(df$Finger)
df$Cond <- factor(df$Cond)
df$Phone <- factor(df$Phone)
df$PId <- factor(df$PId)
# # ANOVA on all Speeds
# +
aov<-ezANOVA(data = df,
dv = Speed_2D,
within = c(Finger, Phone, Cond),
wid = PId,
type = 3,
detailed = F,
return_aov = F)
aov
# -
aov$`Sphericity Corrections`$GGe * c(3, 3, 9, 3, 3, 9)
aov$`Sphericity Corrections`$GGe * c(45, 45, 135, 45, 45, 135)
lme_velocity <- lme(Speed_2D ~ Phone*Cond*Finger, data = df, random=~1|PId)
summary(glht(lme_velocity, linfct=mcp(Cond = "Tukey")), test = adjusted(type = "bonferroni"))
lme_velocity <- lme(Speed_2D ~ Phone*Cond*Finger, data = df, random=~1|PId)
summary(glht(lme_velocity, linfct=mcp(Phone = "Tukey")), test = adjusted(type = "bonferroni"))
lme_velocity <- lme(Speed_2D ~ Phone, data = df, random=~1|PId)
summary(glht(lme_velocity, linfct=mcp(Phone = "Tukey")), test = adjusted(type = "bonferroni"))
testInteractions(lme_velocity, pairwise=c("Phone"), adjustment="bonferroni")
# # ANOVA only on walking
# +
df2 = df[df$Cond == "walking",]
aov2<-ezANOVA(data = df2,
dv = Speed_2D,
within = c(Finger, Phone),
wid = PId,
type = 3,
detailed = F,
return_aov = F)
aov2
# -
lme2 <- lme(Speed_2D ~ Phone, data = df2, random=~1|PId)
summary(glht(lme2, linfct=mcp(Phone = "Tukey")), test = adjusted(type = "bonferroni"))
# # ANOVA only on sitting
df2 = df[df$Cond == "seated",]
aov2<-ezANOVA(data = df2,
dv = Speed_2D,
within = c(Finger, Phone),
wid = PId,
type = 3,
detailed = F,
return_aov = F)
aov2
lme2 <- lme(Speed_2D ~ Phone, data = df2, random=~1|PId)
summary(glht(lme2, linfct=mcp(Phone = "Tukey")), test = adjusted(type = "bonferroni"))
| _R01-Finger-Movement-Activity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import io
import requests
url="https://raw.githubusercontent.com/fbarth/ds-saint-paul/master/data/base_train.csv"
s=requests.get(url).content
X_train = pd.read_csv(io.StringIO(s.decode('utf-8')), sep=",")
X_train = X_train.drop(columns=['Unnamed: 0'])
url="https://raw.githubusercontent.com/fbarth/ds-saint-paul/master/data/base_train_answer.csv"
s=requests.get(url).content
df_diagnosis = pd.read_csv(io.StringIO(s.decode('utf-8')), sep=",")
df_diagnosis = df_diagnosis.drop(columns=['Unnamed: 0'])
y_train = df_diagnosis['diagnosis'].ravel()
# joining info and diagnosis into one df
df_full = pd.concat([df_diagnosis, X_train], axis=1)
df_full.head()
# -
print(X_train.shape)
print(y_train.shape)
# +
import seaborn as sns
import numpy as np
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import recall_score, make_scorer
from sklearn.model_selection import cross_val_score, cross_val_predict
# loop to find best number of estimators
min_estimators = 100
max_estimators = 1000
step = 50
result = []
best_score = 0
best_estimator = 0
for i in range(min_estimators, max_estimators+step, step):
clf = GradientBoostingClassifier(
n_estimators=i,
random_state=0)
s = make_scorer(recall_score, pos_label='M')
scores = cross_val_score(clf, X_train, y_train, cv=5, scoring=s)
if (scores.mean() > best_score):
best_estimator = i
best_score = scores.mean()
result.append((i, scores.mean()))
# converting result into dataframe
estimators = np.array(result)[:,0]
score = np.array(result)[:,1]
d = {'estimators': estimators, 'score': score}
df_scores = pd.DataFrame(d)
print(f'Best estimator: {best_estimator}')
print(df_scores)
# plotting results
sns.set_theme(style="dark")
sns.set_palette("colorblind")
sns.lineplot(
data=df_scores,
x="estimators",
y="score"
)
# best and smallest number of estimators using above loop was 200
# -
clf = GradientBoostingClassifier(
n_estimators=200,
random_state=0)
s = make_scorer(recall_score, pos_label='M')
scores = cross_val_score(clf, X_train, y_train, cv=5, scoring=s)
y_pred = cross_val_predict(clf, X_train, y_train, cv=5)
print("recall_score: %0.5f (+/- %0.5f)" % (scores.mean(), scores.std()))
# +
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report, plot_confusion_matrix
print(confusion_matrix(y_train, y_pred))
print(classification_report(y_train, y_pred))
# -
| scripts/2021_breast_cancer/pipeline_GradientBoostingClassifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Imports" data-toc-modified-id="Imports-1"><span class="toc-item-num">1 </span>Imports</a></span></li><li><span><a href="#Functions" data-toc-modified-id="Functions-2"><span class="toc-item-num">2 </span>Functions</a></span></li><li><span><a href="#Use-Tests" data-toc-modified-id="Use-Tests-3"><span class="toc-item-num">3 </span>Use Tests</a></span></li></ul></div>
# -
# # Imports
#import pysiaf
#siaf = pysiaf.Siaf('MIRI')#,basepath='/Users/dlaw/jwcode/pysiaf/pysiaf/pre_delivery_data/MIRI')
import re
import os
import miricoord.imager.mirim_tools as mt
import miricoord.lrs.lrs_tools as lrst
import miricoord.mrs.mrs_tools as mrst
dist_ver = mrst.version()
print('Distortion solution: ', dist_ver)
# # Functions
# + code_folding=[0]
def get_test_file(fnum):
"""
Relative pointer to current working directory for test files.
Parameters:
fnum - int variable (1,2,3) indicating which test input file to use.
Returns:
reffile - String path to selected test input file.
"""
rootdir= '.'
if (fnum == 1):
file='untitled.pointing'
elif (fnum == 2):
file='test.pointing'
elif (fnum == 3):
file='sub.pointing'
reffile=os.path.join(rootdir,file)
return reffile
# + code_folding=[0]
def get_data(file):
"""
Opens a file, and reads in data.
Parameters:
file - file path
Returns:
data - array of strings
"""
f = open(file,'r')
data = f.read()
f.close()
return data
# + code_folding=[0]
def ref_mode(mode):
"""
Defines reference pixels for different imaging modes.
Parameters:
mode - string containing imaging mode.
Returns:
xref, yref - Floating point reference pixel coordinates
"""
xref, yref = 692.5, 511.5
xref_slit, yref_slit = 325.13, 299.7
xref_slitless, yref_slitless = 37.5, 300.
BRIGHTSKY_x, BRIGHTSKY_y = 711.5, 305.5
SUB256_x, SUB256_y = 539.5, 177.5
SUB128_x, SUB128_y = 69.5, 951.5
SUB64_x, SUB64_y = 37.5, 809.5
if "SLITLESS" in mode:
xref = xref_slitless
yref = yref_slitless
elif "SLIT" in mode:
xref = xref_slit
yref = yref_slit
elif "BRIGHTSKY" in mode:
xref = BRIGHTSKY_x
yref = BRIGHTSKY_y
elif "256" in mode:
xref = SUB256_x
yref = SUB256_y
elif "128" in mode:
xref = SUB128_x
yref = SUB128_y
elif "64" in mode:
xref = SUB64_x
yref = SUB64_y
else:
xref = xref
yref = yref
return xref, yref
# + code_folding=[0]
def print_head(f):
"""
Prints currently relevant header information to top of output file.
Parameters:
f - file IO object
"""
xref, yref = 692.5, 511.5
xref_slit, yref_slit = 325.13, 299.7
xref_slitless, yref_slitless = 37.5, 300.
BRIGHTSKY_x, BRIGHTSKY_y = 711.5, 305.5
SUB256_x, SUB256_y = 539.5, 177.5
SUB128_x, SUB128_y = 69.5, 951.5
SUB64_x, SUB64_y = 37.5, 809.5
f.write('# Dithers are multiplied by -1 pending resolution of http://www.miricle.org/bugzilla/show_bug.cgi?id=588 \n')
f.write('# The following reference pixels are hard-coded for use: \n')
f.write('# Imaging: {}, {} \n'.format(xref,yref))
f.write('# LRS Slit: {}, {} \n'.format(xref_slit,yref_slit))
f.write('# LRS Slitless: {}, {} \n'.format(xref_slitless,yref_slitless))
f.write('# BRIGHTSKY: {}, {} \n'.format(BRIGHTSKY_x,BRIGHTSKY_y))
f.write('# SUB256: {}, {} \n'.format(SUB256_x, SUB256_y))
f.write('# SUB128: {}, {} \n'.format(SUB128_x, SUB128_y))
f.write('# SUB64: {}, {} \n\n'.format(SUB64_x, SUB64_y))
return
# + code_folding=[0, 26]
def make_dith_file(in_file, outfile, dist_ver=dist_ver):
"""
Converts an APT pointing file to a list of dithers for use in MIRIsim.
Parameters:
in_file - APT pointing file path
Returns:
outfile - Text file containing MIRIsim readable dithers.
"""
#set distortion solution (default to current)
mrst.set_toolversion(dist_ver)
#Read apt data and split into rows
data = get_data(in_file)
split_data = data.split('\n')
#open output file
f = open(outfile,"w+")
#append header info to output file
print_head(f)
for row in split_data:
#split row into columns on whitespace
r = row.split()
#rows with < 20 columns contain no data
if len(r) < 20:
f.write(str('#' + ' '.join(r)+ '\n'))
else:
# 'MIRIM' indicates Imager or LRS
if "MIRIM" in r[4]:
v2 = float(r[13])
v3 = float(r[14])
#convert v2 and v3 coordinates to x and y
x,y = mt.v2v3toxy(v2,v3,'F770W')
#determine the proper reference pixel for the imaging mode
xref, yref = ref_mode(r[4])
#these keywords all indicate a coronagraphic image, not supported by MIRIsim
if "BLOCK" in r[4] or "UR" in r[4] or "MASK" in r[4]:
f.write("#MIRIsim does not support Coronagraphy \n")
continue
#compute dither
else:
dx = xref - x[0]
dy = yref - y[0]
#write to file
try:
s = "{0:.2f}, {1:.2f}".format(dx,dy)
f.write(s + '\n')
except:
pass
#'MIRIFU' indicates MRS
elif "MIRIFU" in r[4]:
v2 = float(r[13])
v3 = float(r[14])
#determine stype
channel = r[4][-2:]
#convert to alpha,beta
a,b = mrst.v2v3toab(v2,v3, channel) #mult by -1
da = -1.*a
db = -1.*b
try:
s = "{0:.3f}, {1:.3f}".format(da,db)
f.write(s + '\n')
except:
pass
f.close()
return
# + code_folding=[0]
if __name__ == "Main":
infile = input("APT pointing file name: ")
outfile = input("Output file name: ")
#functionality for changing distortion solution
#ans = input("Would you like to change distortion solution? y/n")
#if ans.lower() == 'y':
# dist_ver = input('Enter distortion version: ')
make_dith_file(infile, outfile, dist_ver)
# -
# # Use Tests
fname = get_test_file(2)
#make_dith_file(fname, 'subout.txt')
from miricoord.apt2dither import apt2dither
apt2dither.make_dith_file(fname,'out.txt')
help(apt2dither)
| miricoord/apt2dither/APT_to_MIRIsim_dithers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Para entrar no modo apresentação, execute a seguinte célula e pressione `-`
# %reload_ext slide
# <span class="notebook-slide-start"/>
#
# # IPython
#
# Este notebook apresenta os seguintes tópicos:
#
# - [Mágicas do IPython](#%22M%C3%A1gicas%22-do-IPython)
# - [Como definir Mágicas](#Registrando-novas-magics)
# - [Exercício 2](#Exerc%C3%ADcio-2)
# - [Exercício 3](#Exerc%C3%ADcio-3)
# - [Exercício 4](#Exerc%C3%ADcio-4)
# ## "Mágicas" do IPython
#
# Na parte anterior do minicurso, apresentamos **bang expression** como uma extensão da linguagem Python fornecida pelo kernel IPython para executar comandos no sistema.
#
# Além dessa extensão, o IPython também permite escrever "mágicas"/"magics" que modificam a forma de executar operações. Existem duas principais formas de "magics":
#
# - line magic: altera o restante da linha
# - cell magic: altera a célula inteira
# ### Line magic
#
# A seguir temos um exemplo de line magic que mostra o histórico de células executadas com a numeração de células.
#
# <span class="notebook-slide-extra" data-count="3"/>
a = 1
b = a
# %history -n
# %history
# Essa line magic apenas imprimiu o histórico, porém existem outras que podem ser usadas no meio de expressões do Python, como a `%who_ls`, que retorna todas as variáveis de um determinado tipo definidas no notebook.
#
# <span class="notebook-slide-extra" data-count="1"/>
# variaveis = %who_ls int
for var, _ in zip(variaveis, range(5)):
print(var, eval(var))
# <span class="notebook-slide-extra" data-count="1"/>
#
# Além de estender a sintaxe do Python para adicionar bang expressions e magics, o IPython também permite consultar a documentação de módulos, classes, funções e magics, ao adicionar `?` após o nome.
# %who_ls?
# +
import ast
# ast?
# -
# <span class="notebook-slide-extra" data-count="1"/>
#
# O uso de duas interrogações (`??`) exibe o código fonte.
# %who_ls??
# +
from radon.complexity import cc_visit, cc_visit_ast
# cc_visit_ast??
# -
# ### Cell magic
#
# Cell magics permitem alterar a execução de uma célula por completo. A cell magic a seguir executa código javascript no navegador. <span class="notebook-slide-extra" data-count="1"/>
#
#
# + language="javascript"
#
# var index = IPython.notebook.get_selected_index()
# IPython.notebook.insert_cell_below('code')
# IPython.notebook.get_cells()[index + 1].set_text("Nova celula")
# -
# <span class="notebook-slide-extra" data-count="1"/>
#
# Já a cell magic a seguir calcula o tempo de execução de uma célula Python.
# %%time
from time import sleep
sleep(2)
# ### Como ocorre a execução
#
# A line magic `%history` apresentada anteriormente pode ser usada para entender o que o IPython está fazendo quando usamos essas magics. Para isso, precisamos ver o histórico traduzido para Python, utilizando a flag `-t`. <span class="notebook-slide-extra" data-count="1"/>
# %history -t -l 12
# <span class="notebook-slide-extra" data-count="1"/>
#
# Note os seguintes comandos:
#
# ```python
# get_ipython().run_cell_magic('time', '', 'from time import sleep\nsleep(2)\n')
# get_ipython().run_line_magic('who_ls', 'int')
# ```
#
# Eles indicam o que o shell do IPython (resultado de `get_ipython()`) deve executar. A função indica se deve executar cell magic ou line magic. O primeiro parâmetro indica o nome da magic. Por fim, os últimos parâmetros indicam os parâmetros para a função da magic.
#
# Esses comandos podem ser executados diretamente no notebook:
get_ipython().run_line_magic('who_ls', 'int')
# ### Lista de magics
#
# Podemos usar a magic `%lsmagic` para listar quais são todas as magics do IPython e a magic `%magic` para entender como funciona a parte de magics. <span class="notebook-slide-extra" data-count="1"/>
# %lsmagic
# <span class="notebook-slide-scroll" data-position="-1"/>
#
# Perceba que automagic está ativo, isso significa que podemos usar line magics sem `%` explícito: <span class="notebook-slide-extra" data-count="1"/>
who_ls int
# Para outras magics, veja o arquivo InteratividadeExtra.ipynb
# ### Registrando novas magics
#
# Agora que sabemos como o IPython executa as magics, podemos pensar em criar e registrar novas magics. <span class="notebook-slide-extra" data-count="1"/>
# +
from IPython.core.magic import Magics, magics_class, cell_magic
@magics_class
class LenMagic(Magics):
@cell_magic
def size(self, line, cell):
return len(cell)
# -
# Em seguida registramos a magic: <span class="notebook-slide-extra" data-count="1"/>
shell = get_ipython()
shell.register_magics(LenMagic)
# Com isso, podemos usar para obter o tamanho de códigos de células: <span class="notebook-slide-extra" data-count="1"/>
# %%size
print("a")
# <span class="notebook-slide-no-scroll"/>
#
# Note que o conteúdo da célula não foi executado. Ao invés disso, ele foi passado para a função size que o processou e retornou `11`
# Agora vamos para um exemplo mais complicado, com argumentos, criação dinâmica de classes e análise da AST. <span class="notebook-slide-extra" data-count="1"/>
# +
import ast
from IPython.core.magic_arguments import magic_arguments, argument, parse_argstring
@magics_class
class ASTMagic(Magics):
@magic_arguments()
@argument(
"methods",
default=["visit_Assign", "visit_AugAssign"],
nargs="*",
help="method names to be defined on AST Visitor"
)
@cell_magic
def count_ast(self, line, cell):
args = parse_argstring(self.count_ast, line)
class CustomVisitor(ast.NodeVisitor):
def __init__(self):
self.count = 0
def _increment_counter(self, node):
self.count += 1
for method in args.methods:
setattr(CustomVisitor, method, CustomVisitor._increment_counter)
tree = ast.parse(cell)
visitor = CustomVisitor()
visitor.visit(tree)
return visitor.count
shell = get_ipython()
shell.register_magics(ASTMagic)
# -
# Neste exemplo, definimos argumentos usando decoradores e usamos a função `parse_argstring` para transformá-los em uma estrutura. A definição segue o `argparse` do Python: https://docs.python.org/3/library/argparse.html
#
# ```python
# @magic_arguments()
# @argument(
# "methods",
# default=["visit_Assign", "visit_AugAssign"],
# nargs="*",
# help="method names to be defined on AST Visitor"
# )
# ```
# Além da parte dos argumentos, criamos classes dinamicamente dentro da função e definimos os métodos dela como sendo referências ao método `_increment_counter`.
#
# ```python
# for method in args.methods:
# setattr(CustomVisitor, method, CustomVisitor._increment_counter)
# ```
# Por fim, executamos o visitor e retornamos a contagem.
#
# ```python
# tree = ast.parse(cell)
# visitor = CustomVisitor()
# visitor.visit(tree)
# return visitor.count
# ```
# %%count_ast
def f():
pass
a = 1
b = 2
c = 3
# %%count_ast visit_FunctionDef
def f():
pass
a = 10
b = 2
c = 3
# ## Exercício 2
#
# Modifique a magic `count_ast` para retornar um dicionário ou counter com uma contagem de todos os nós da ast. O nome da magic resultante deve ser `ast_counter`.
#
# Dicas:
# - Use o método `generic_visit(self, node)` para visitar os nós da AST sem especificar o nome
# - Obtenha o nome do elemento na AST usando `type(node).__name__`
# - Visite nós recursivamente
#
# <span class="notebook-slide-extra" data-count="2"/>
# +
from collections import Counter
class ASTCounter(ast.NodeVisitor):
def __init__(self):
self.counter = Counter()
def generic_visit(self, node):
self.counter[type(node).__name__] += 1
super().generic_visit(node)
# -
tree = ast.parse("a = 1")
visitor = ASTCounter()
visitor.visit(tree)
visitor.counter
# +
@magics_class
class ASTMagic(Magics):
@cell_magic
def counter_ast(self, line, cell):
tree = ast.parse(cell)
visitor = ASTCounter()
visitor.visit(tree)
return visitor.counter
shell = get_ipython()
shell.register_magics(ASTMagic)
# -
# %%counter_ast
def f():
pass
a = 1
b = 2
c = 3
# ## Exercício 3
#
# Crie uma magic, `%%radon`, que utilize radon para extrair informações de complexidade ciclomática e linhas de código de uma célula. <span class="notebook-slide-extra" data-count="2"/>
# +
from radon.raw import analyze
from radon.complexity import cc_visit
template = """
def __radon_analysis():
{}
"""
@magics_class
class RadonMagic(Magics):
@cell_magic
def radon(self, line, cell):
code_with_def = template.format(
"\n ".join(cell.split("\n"))
)
return (analyze(cell), cc_visit(code_with_def))
shell = get_ipython()
shell.register_magics(RadonMagic)
# -
# %%radon
def f():
pass
# a = 1
if a:
b = 2
if b:
c = 3
# ## Exercício 4
#
# Faça uma line magic para clonar repositórios do GitHub recebendo o repositório no formato `Organizacao/Repositorio` e com argumentos para especificar o diretório e o commit.
#
# Exemplo de uso:
#
# ```
# # %clone gems-uff/sapos -d repos/sapos -c a9b0f7b3
# ```
#
# Dicas:
#
# - Você pode usar **bang expressions** para chamar os comandos `git clone` e `git checkout`.
# - Bang expressions aceitam combinar variáveis do Python usando usando `{variavel}`, entre chaves
# - A URL de um repositório do tipo `owner/name` no GitHub é `https://github.com/owner/name.git` <span class="notebook-slide-extra" data-count="2"/>
#
# +
from IPython.core.magic import line_magic
@magics_class
class CloneMagic(Magics):
@magic_arguments()
@argument(
"repo", type=str,
help="owner/repo"
)
@argument(
"-d", "--dir", type=str,
help="target dir"
)
@argument(
"-c", "--commit", type=str,
help="commit"
)
@line_magic
def clone(self, line):
args = parse_argstring(self.clone, line)
repo = "https://github.com/{}.git".format(args.repo)
if args.dir:
rdir = args.dir
else:
rdir = args.repo.split("/")[1]
# !git clone $repo $rdir
if args:
# %cd $rdir
# !git checkout {args.commit}
shell = get_ipython()
shell.register_magics(CloneMagic)
# -
# %clone gems-uff/sapos -d repos/sapos -c a9b0f7b3
# Continua: [4.Proxy.ipynb](4.Proxy.ipynb)
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
#
| minicurso/3.IPython.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Monthly NO2 Concentrations in Atmosphere cit.018 https://avdc.gsfc.nasa.gov/pub/data/satellite/Aura/OMI/V03/L3/OMNO2D_HR/OMNO2D_HRM/
# +
import numpy as np
import pandas as pd
import rasterio as rio
from netCDF4 import Dataset
pd.options.display.max_rows = 10000
pd.options.display.max_colwidth = 10000
import boto3
import requests
from urllib.request import urlopen
import shutil
from contextlib import closing
from matplotlib import pyplot as plt
# %matplotlib inline
import os
import sys
import threading
from glob import glob
from datetime import datetime
# -
# Remote server files
# +
online_folder = "https://avdc.gsfc.nasa.gov/pub/data/satellite/Aura/OMI/V03/L3/OMNO2D_HR/OMNO2D_HRM/"
res = requests.get(online_folder)
res = res.text
res = res.split("\n")
listing = []
for line in res:
listing.append(line.rstrip())
listing
df1 = pd.DataFrame(listing)
keep = df1.apply(lambda row: "OMI_trno2_0.10x0.10" in row[0], axis=1)
#lambda row <- row is a variable name row[0] indicate it will look into each row
df2 = df1.loc[keep]
nc_files = df2.apply(lambda row: ".nc" in row[0], axis=1)
dat_files = df2.apply(lambda row: ".dat" in row[0], axis=1)
df_nc_long = df2.loc[nc_files]
df_dat_long = df2.loc[dat_files]
df_dat_long
df_nc_long
def pull_file_name(string, file_type):
ix = string.find("a href")
file_name = string[ix+8:ix+38+len(file_type)]
date = file_name.split("_")[3]
year = date[0:4]
month = date[4:]
return(file_name, year, month)
# +
online_folder = "https://avdc.gsfc.nasa.gov/pub/data/satellite/Aura/OMI/V03/L3/OMNO2D_HR/OMNO2D_HRM/"
res = requests.get(online_folder)
res = res.text
res = res.split("\n")
listing = []
for line in res:
listing.append(line.rstrip())
df1 = pd.DataFrame(listing)
keep = df1.apply(lambda row: "OMI_trno2_0.10x0.10" in row[0], axis=1)
df2 = df1.loc[keep]
nc_files = df2.apply(lambda row: ".nc" in row[0], axis=1)
dat_files = df2.apply(lambda row: ".dat" in row[0], axis=1)
df_nc_long = df2.loc[nc_files]
df_dat_long = df2.loc[dat_files]
#string and file_type are variable name
def pull_file_name(string, file_type):
ix = string.find("a href")
file_name = string[ix+8:ix+38+len(file_type)]
date = file_name.split("_")[3]
year = date[0:4]
month = date[4:]
return(file_name, year, month)
df_nc = pd.DataFrame(columns=["file_name", "year", "month"])
df_dat = pd.DataFrame(columns=["file_name", "year", "month"])
df_nc["file_name"], df_nc["year"], df_nc["month"] = zip(* ng.apply(lambda row: pull_file_name(row[0], "dat"), axis=1))
# +
var = "hello world"
var.find("l")
a =[1,2,3]
b = [4,5,6]
c = [7,8,9]
d= list (zip(a,b,c))
zip ()
print (a)
print (b)
print(d)
e =list(zip(*d))
print(e)
# -
df_dat
# Download Local Files
# +
local_folder = "/Users/nathansuberi/Desktop/RW_Data/Rasters/no2concentrations/"
try:
os.mkdir(local_folder)
except FileExistsError:
print("Folder already exists")
most_recent = df_dat.iloc[-1]["file_name"]
print(most_recent)
local_orig = local_folder + most_recent
with(closing(urlopen(online_folder + most_recent))) as r:
with(open(local_orig, 'wb')) as f:
shutil.copyfileobj(r, f)
# +
local_edit = local_orig[:-4] + "_edit.tif"
with rio.open(local_orig, 'r') as src:
data = src.read()[0]
rows = data.shape[0]
columns = data.shape[1]
# Return lat info
south_lat = -90
north_lat = 90
# Return lon info
west_lon = -180
east_lon = 180
print(rows)
print(columns)
transform = rasterio.transform.from_bounds(west_lon, south_lat, east_lon, north_lat, columns, rows)
# Profile
profile = {
'driver':'GTiff',
'height':rows,
'width':columns,
'count':1,
'dtype':np.float32,
'crs':'EPSG:4326',
'transform':transform,
'compress':'lzw',
'nodata': -1
}
with rio.open(local_edit, "w", **profile) as dst:
dst.write(data.astype(profile["dtype"]), 1)
# -
# Define s3 location based on most recent observation
# +
file_name = df_dat.iloc[-1].file_name
year = file_name[20:24]
month = file_name[24:26]
s3_upload = boto3.client("s3")
s3_download = boto3.resource("s3")
s3_bucket = "wri-public-data"
s3_folder = "resourcewatch/raster/*"
s3_file = "*.tif"
s3_key_orig = s3_folder + s3_file
s3_key_edit = s3_key_orig[0:-4] + "_edit.tif"
class ProgressPercentage(object):
def __init__(self, filename):
self._filename = filename
self._size = float(os.path.getsize(filename))
self._seen_so_far = 0
self._lock = threading.Lock()
def __call__(self, bytes_amount):
# To simplify we'll assume this is hooked up
# to a single filename.
with self._lock:
self._seen_so_far += bytes_amount
percentage = (self._seen_so_far / self._size) * 100
sys.stdout.write(
"\r%s %s / %s (%.2f%%)" % (
self._filename, self._seen_so_far, self._size,
percentage))
sys.stdout.flush()
# +
# Original
s3_upload.upload_file(local_orig, s3_bucket, s3_key_orig,
Callback=ProgressPercentage(local_orig))
# Edit
s3_upload.upload_file(local_edit, s3_bucket, s3_key_edit,
Callback=ProgressPercentage(local_edit))
| ResourceWatchCode/Raster Dataset Processing/Raster Prep Notebooks/cit.018_calculate_running_average-later_done.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
import pandas as pd
import numpy as np
# Read NYC trees data
nyc_trees = pd.read_csv("./nyc_tree_census.csv")
tree_health_statuses = nyc_trees.health.unique()
print(tree_health_statuses)
health_categories = ['Poor', 'Fair', 'Good']
nyc_trees.health = pd.Categorical(nyc_trees.health, health_categories, ordered=True)
median_index = np.median(nyc_trees.health.cat.codes)
median_health_status = health_categories[int(median_index)]
print(median_health_status)
| Ordinal Categorical Variables - Central Tendency I.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Homework 7
# In this homework, we will implement a simplified version of object detection process. Note that the tests on the notebook are not comprehensive, autograder will contain more tests.
# +
from __future__ import print_function
import random
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from skimage import io
from skimage.feature import hog
from skimage import data, color, exposure
from skimage.transform import rescale, resize, downscale_local_mean
import glob, os
import fnmatch
import time
import warnings
warnings.filterwarnings('ignore')
from detection import *
from visualization import *
from utils import *
# This code is to make matplotlib figures appear inline in the
# notebook rather than in a new window.
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# Some more magic so that the notebook will reload external python modules;
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# %reload_ext autoreload
# -
# # Part 1: Hog Representation (5 points)
#
# In this section, we will compute the average hog representation of human faces.<br>
# There are 31 aligned face images provided in the `\face` folder. They are all aligned and have the same size. We will get an average face from these images and compute a hog feature representation for the averaged face. <br>
# Use the hog function provided by skimage library, and implement a hog representation of objects.
# Implement **`hog_feature`** function in `detection.py`
# +
image_paths = fnmatch.filter(os.listdir('./face'), '*.jpg')
list.sort(image_paths)
n = len(image_paths)
face_shape, avg_face = load_faces(image_paths, n)
(face_feature, hog_image) = hog_feature(avg_face)
print("Sum of face feature = ", np.sum(face_feature))
assert np.abs(np.sum(face_feature) - 499.970465079) < 1e-2
plot_part1(avg_face, hog_image)
# -
# # Part 2: Sliding Window (20 points)
# Implement **`sliding_window`** function to have windows slide across an image with a specific window size. The window slides through the image and checks if an object is detected with a high similarity score with the template at every location. We compute these scores as the dot product of the HoG features of the template and the HoG features of each window as the windows slide through the image. These scores will generate a response map and you will be able to find the location of the window with the highest hog score.
#
# +
image_path = 'image_0001.jpg'
image = io.imread(image_path, as_gray=True)
image = rescale(image, 0.8)
(winH, winW) = face_shape
(score, r, c, response_map_resized, response_map) = \
sliding_window(image, face_feature, step_size=30, window_size=face_shape, return_unresized_response=True)
print("Maximum HOG face feature score over sliding window = ", score)
print("Maximum score location = row {}, col {}".format(r, c))
crop = image[r:r+winH, c:c+winW]
plot_part2(image, r, c, response_map_resized, response_map, winW, winH)
# -
# Sliding window successfully found the human face in the above example. However, in the cell below, we are only changing the scale of the image, and you can see that sliding window does not work once the scale of the image is changed.
# +
image_path = 'image_0001.jpg'
image = io.imread(image_path, as_gray=True)
image = rescale(image, 1.2)
(winH, winW) = face_shape
(score, r, c, response_map_resized, response_map) = \
sliding_window(image, face_feature, step_size=30, window_size=face_shape, return_unresized_response=True)
print("Maximum HoG face feature score over sliding window = ", score)
print("Maximum score location = row {}, col {}".format(r, c))
crop = image[r:r+winH, c:c+winW]
plot_part2(image, r, c, response_map_resized, response_map, winW, winH)
# -
# # Part 3: Image Pyramids (20 points)
# In order to make sliding window work for different scales of images, you need to implement image pyramids where you resize the image to different scales and run the sliding window method on each resized image. This way you scale the objects and can detect both small and large objects.
#
# ### 3.1 Image Pyramid (5 points)
#
# Implement **`pyramid`** function in `detection.py`, this will create pyramid of images at different scales. Run the following code, and you will see the shape of the original image gets smaller until it reaches a minimum size.
#
# +
image_path = 'image_0001.jpg'
image = io.imread(image_path, as_gray=True)
image = rescale(image, 1.2)
images = pyramid(image, scale = 0.9)
plot_part3_1(images)
# -
# ### 3.2 Pyramid Score (15 points)
#
# After getting the image pyramid, we will run sliding window on all the images to find a place that gets the highest score. Implement **`pyramid_score`** function in `detection.py`. It will return the highest score and its related information in the image pyramids.
# +
image_path = 'image_0001.jpg'
image = io.imread(image_path, as_gray=True)
image = rescale(image, 1.2)
(winH, winW) = face_shape
max_score, maxr, maxc, max_scale, max_response_map = pyramid_score \
(image, face_feature, face_shape, step_size = 30, scale=0.8)
print("Maximum HoG face feature score over pyramid and sliding window = ", max_score)
print("Maximum score location = row {}, col {}".format(maxr, maxc))
plot_part3_2(image, max_scale, winW, winH, maxc, maxr, max_response_map)
# -
# From the above example, we can see that image pyramid has fixed the problem of scaling. Then in the example below, we will try another image and implement a deformable parts model.
# +
image_path = 'image_0338.jpg'
image = io.imread(image_path, as_gray=True)
image = rescale(image, 1.0)
(winH, winW) = face_shape
max_score, maxr, maxc, max_scale, max_response_map = pyramid_score \
(image, face_feature, face_shape, step_size = 30, scale=0.8)
print("Maximum HoG face feature score over pyramid and sliding window = ", max_score)
print("Maximum score location = row {}, col {}".format(maxr, maxc))
plot_part3_2(image, max_scale, winW, winH, maxc, maxr, max_response_map)
# -
# # Part 4: Deformable Parts Detection (15 Points)
# In order to solve the problem above, you will implement deformable parts model in this section, and apply it on human faces. <br>
# The first step is to get a detector for each part of the face, including left eye, right eye, nose and mouth. <br>
# For example for the left eye, we have provided the groundtruth location of left eyes for each image in the `\face` directory. This is stored in the `lefteyes` array with shape `(n,2)`, each row is the `(r,c)` location of the center of left eye. You will then find the average hog representation of the left eyes in the images.
# Run through the following code to get a detector for left eyes.
# +
image_paths = fnmatch.filter(os.listdir('./face'), '*.jpg')
parts = read_facial_labels(image_paths)
lefteyes, righteyes, noses, mouths = parts
# Typical shape for left eye
lefteye_h = 10
lefteye_w = 20
lefteye_shape = (lefteye_h, lefteye_w)
avg_lefteye = get_detector(lefteye_h, lefteye_w, lefteyes, image_paths)
(lefteye_feature, lefteye_hog) = hog_feature(avg_lefteye, pixel_per_cell=2)
plot_part4(avg_lefteye, lefteye_hog, 'left eye')
# -
# Run through the following code to get a detector for right eye.
# +
righteye_h = 10
righteye_w = 20
righteye_shape = (righteye_h, righteye_w)
avg_righteye = get_detector(righteye_h, righteye_w, righteyes, image_paths)
(righteye_feature, righteye_hog) = hog_feature(avg_righteye, pixel_per_cell=2)
plot_part4(avg_righteye, righteye_hog, 'right eye')
# -
# Run through the following code to get a detector for nose.
# +
nose_h = 30
nose_w = 26
nose_shape = (nose_h, nose_w)
avg_nose = get_detector(nose_h, nose_w, noses, image_paths)
(nose_feature, nose_hog) = hog_feature(avg_nose, pixel_per_cell=2)
plot_part4(avg_nose, nose_hog, 'nose')
# -
# Run through the following code to get a detector for mouth
# +
mouth_h = 20
mouth_w = 36
mouth_shape = (mouth_h, mouth_w)
avg_mouth = get_detector(mouth_h, mouth_w, mouths, image_paths)
(mouth_feature, mouth_hog) = hog_feature(avg_mouth, pixel_per_cell=2)
detectors_list = [lefteye_feature, righteye_feature, nose_feature, mouth_feature]
plot_part4(avg_mouth, mouth_hog, 'mouth')
# -
# ### 4.1 Compute displacement (10 points)
#
# Implement **`compute_displacement`** to get an average shift vector mu and standard deviation sigma for each part of the face. The vector mu is the distance from the main center, i.e the center of the face, to the center of the part. Note that you can and should leave mu as a decimal instead of rounding to integers, because our next step of applying the shift in **`shift_heatmap`** will interpolate the shift, which is valid for decimal shifts.
#
# test for compute_displacement
test_array = np.array([[0,1],[1,2],[2,3],[3,4]])
test_shape = (6,6)
mu, std = compute_displacement(test_array, test_shape)
assert(np.all(mu == [1.5,0.5]))
assert(np.sum(std-[ 1.11803399, 1.11803399])<1e-5)
print("Your implementation is correct!")
# +
lefteye_mu, lefteye_std = compute_displacement(lefteyes, face_shape)
righteye_mu, righteye_std = compute_displacement(righteyes, face_shape)
nose_mu, nose_std = compute_displacement(noses, face_shape)
mouth_mu, mouth_std = compute_displacement(mouths, face_shape)
print("Left eye shift = ", lefteye_mu)
print("Right eye shift = ", righteye_mu)
print("Nose shift = ", nose_mu)
print("Mouth shift = ", mouth_mu)
print("\nLeft eye std = ", lefteye_std)
print("Right eye std = ", righteye_std)
print("Nose std = ", nose_std)
print("Mouth std = ", mouth_std)
# -
# After getting the shift vectors, we can run our detector on a test image. We will first run the following code to detect each part of left eye, right eye, nose and mouth in the image. You will see a response map for each of them.
# +
image_path = 'image_0338.jpg'
image = io.imread(image_path, as_gray=True)
image = rescale(image, 1.0)
(face_H, face_W) = face_shape
max_score, face_r, face_c, face_scale, face_response_map = pyramid_score\
(image, face_feature, face_shape,step_size = 30, scale=0.8)
print("Maximum HoG face feature score over pyramid and sliding window = ", max_score)
print("Maximum score location = row {}, col {}".format(face_r, face_c))
plot_part5_1(face_response_map)
# +
max_score, lefteye_r, lefteye_c, lefteye_scale, lefteye_response_map = \
pyramid_score(image, lefteye_feature,lefteye_shape, step_size = 20,scale=0.9, pixel_per_cell = 2)
lefteye_response_map = resize(lefteye_response_map, face_response_map.shape)
print("Maximum HoG face feature score over pyramid and sliding window = ", max_score)
print("Maximum score location = row {}, col {}".format(lefteye_r, lefteye_c))
plot_part5_1(lefteye_response_map)
# +
max_score, righteye_r, righteye_c, righteye_scale, righteye_response_map = \
pyramid_score (image, righteye_feature, righteye_shape, step_size = 20,scale=0.9, pixel_per_cell=2)
righteye_response_map = resize(righteye_response_map, face_response_map.shape)
print("Maximum HoG face feature score over pyramid and sliding window = ", max_score)
print("Maximum score location = row {}, col {}".format(righteye_r, righteye_c))
plot_part5_1(righteye_response_map)
# +
max_score, nose_r, nose_c, nose_scale, nose_response_map = \
pyramid_score (image, nose_feature, nose_shape, step_size = 20,scale=0.9, pixel_per_cell = 2)
nose_response_map = resize(nose_response_map, face_response_map.shape)
print("Maximum HoG face feature score over pyramid and sliding window = ", max_score)
print("Maximum score location = row {}, col {}".format(nose_r, nose_c))
plot_part5_1(nose_response_map)
# +
max_score, mouth_r, mouth_c, mouth_scale, mouth_response_map =\
pyramid_score (image, mouth_feature, mouth_shape, step_size = 20,scale=0.9, pixel_per_cell = 2)
mouth_response_map = resize(mouth_response_map, face_response_map.shape)
print("Maximum HoG face feature score over pyramid and sliding window = ", max_score)
print("Maximum score location = row {}, col {}".format(mouth_r, mouth_c))
plot_part5_1(mouth_response_map)
# -
# ### 4.2 Shift heatmap (5 points)
#
# After getting the response maps for each part of the face, we will shift these maps so that they all have the same center as the face. We have calculated the shift vector mu in `compute_displacement`, so we are shifting based on vector mu. Implement `shift_heatmap` function in `detection.py`.
face_heatmap_shifted = shift_heatmap(face_response_map, [0,0])
print("Heatmap face max and min = ", face_heatmap_shifted.max(), face_heatmap_shifted.min())
print("Heatmap face max location = ", np.unravel_index(face_heatmap_shifted.argmax(), face_heatmap_shifted.shape))
plot_part5_2_face(face_heatmap_shifted)
# +
lefteye_heatmap_shifted = shift_heatmap(lefteye_response_map, lefteye_mu)
righteye_heatmap_shifted = shift_heatmap(righteye_response_map, righteye_mu)
nose_heatmap_shifted = shift_heatmap(nose_response_map, nose_mu)
mouth_heatmap_shifted = shift_heatmap(mouth_response_map, mouth_mu)
print("Heatmap left eye max and min = ",
lefteye_heatmap_shifted.max(), lefteye_heatmap_shifted.min())
print("Heatmap left eye max location = ",
np.unravel_index(lefteye_heatmap_shifted.argmax(), lefteye_heatmap_shifted.shape))
print("Heatmap right eye max and min = ",
righteye_heatmap_shifted.max(), righteye_heatmap_shifted.min())
print("Heatmap right eye max location = ",
np.unravel_index(righteye_heatmap_shifted.argmax(), righteye_heatmap_shifted.shape))
print("Heatmap nose max and min = ",
nose_heatmap_shifted.max(), nose_heatmap_shifted.min())
print("Heatmap nose max location = ",
np.unravel_index(nose_heatmap_shifted.argmax(), nose_heatmap_shifted.shape))
print("Heatmap mouth max and min = ",
mouth_heatmap_shifted.max(), mouth_heatmap_shifted.min())
print("Heatmap mouth max location = ",
np.unravel_index(mouth_heatmap_shifted.argmax(), mouth_heatmap_shifted.shape))
plot_part5_2_parts(lefteye_heatmap_shifted, righteye_heatmap_shifted,
nose_heatmap_shifted, mouth_heatmap_shifted)
# -
# # Part 5: Gaussian Filter (15 points)
#
# ## Part 5.1 Gaussian Filter (10 points)
# In this part, apply gaussian filter convolution to each heatmap. Blur by kernel of standard deviation sigma, and then add the heatmaps of the parts with the heatmap of the face. On the combined heatmap, find the maximum value and its location. You can use function provided by skimage to implement **`gaussian_heatmap`**.
#
# +
heatmap_face = face_heatmap_shifted
heatmaps = [lefteye_heatmap_shifted,
righteye_heatmap_shifted,
nose_heatmap_shifted,
mouth_heatmap_shifted]
sigmas = [lefteye_std, righteye_std, nose_std, mouth_std]
heatmap, i , j = gaussian_heatmap(heatmap_face, heatmaps, sigmas)
print("Heatmap shape = ", heatmap.shape)
print("Image shape = ", image.shape)
print("Gaussian heatmap max and min = ", heatmap.max(), heatmap.min())
print("Gaussian heatmap max location = ", np.unravel_index(heatmap.argmax(), heatmap.shape))
print("Resizing heatmap to image shape ...")
plot_part6_1(winH, winW, heatmap, image, i, j)
# -
# ## 5.2 Result Analysis (5 points)
#
# Does your DPM work on detecting human faces? Can you think of a case where DPM may work better than the detector we had in part 3 (sliding window + image pyramid)? You can also have examples that are not faces.
# **Your Answer:** Write your answer in this markdown cell.
# ## Extra Credit
# You have tried detecting one face from the image, and the next step is to extend it to detecting multiple occurences of the object. For example in the following image, how do you detect more than one face from your response map? Implement the function **`detect_multiple`**, and write code to visualize your detected faces in the cell below.
image_path = 'image_0002.jpg'
image = io.imread(image_path, as_gray=True)
plt.imshow(image)
plt.show()
# +
image_path = 'image_0002.jpg'
image = io.imread(image_path, as_gray=True)
heatmap = get_heatmap(image, face_feature, face_shape, detectors_list, parts)
plt.imshow(heatmap, cmap='viridis', interpolation='nearest')
plt.show()
# +
detected_faces = detect_multiple(image, heatmap)
# Visualize your detected faces
### YOUR CODE HERE
pass
### END YOUR CODE
# -
# ---
# # Part 6: K-Nearest Neighbors Classification (25 points)
#
# ## Face Dataset
#
# We will use a dataset of faces of celebrities. Download the dataset using the following command:
#
# sh get_dataset.sh
#
# The face dataset for CS131 assignment.
# The directory containing the dataset has the following structure:
#
# faces/
# train/
# angelina jolie/
# anne hathaway/
# ...
# test/
# angelina jolie/
# anne hathaway/
# ...
#
# Each class has 50 training images and 10 testing images.
# +
from utils_knn import load_dataset
X_train, y_train, classes_train = load_dataset('faces', train=True, as_gray=True)
X_test, y_test, classes_test = load_dataset('faces', train=False, as_gray=True)
assert classes_train == classes_test
classes = classes_train
print('Class names:', classes)
print('Training data shape:', X_train.shape)
print('Training labels shape: ', y_train.shape)
print('Test data shape:', X_test.shape)
print('Test labels shape: ', y_test.shape)
# -
# Visualize some examples from the dataset.
# We show a few examples of training images from each class.
num_classes = len(classes)
samples_per_class = 10
for y, cls in enumerate(classes):
idxs = np.flatnonzero(y_train == y)
idxs = np.random.choice(idxs, samples_per_class, replace=False)
for i, idx in enumerate(idxs):
plt_idx = i * num_classes + y + 1
plt.subplot(samples_per_class, num_classes, plt_idx)
plt.imshow(X_train[idx])
plt.axis('off')
if i == 0:
plt.title(y)
plt.show()
# Flatten the image data into rows
# we now have one 4096 dimensional featue vector for each example
X_train_flat = np.reshape(X_train, (X_train.shape[0], -1))
X_test_flat = np.reshape(X_test, (X_test.shape[0], -1))
print("New training data shape:", X_train_flat.shape)
print("New test data shape:", X_test_flat.shape)
# ## Part 6.1: Cross Validation on Raw Pixel Features (15 Points)
#
# We're now going to try to classify the test images using the k-nearest neighbors algorithm on the **raw features of the images** (i.e. the pixel values themselves). We will see later how we can use kNN on better features.
#
# The gist of the k-nearest neighbors algorithm is to predict a test image's class based on which classes the k nearest train images belong to. For example, using k = 3, if we found that for test image X, the three nearest train images were 2 pictures of <NAME>, and one picture of <NAME>, we would predict that the test image X is a picture of <NAME>.
#
# Here are the steps that we will follow:
#
# 1. We compute the L2 distances between every element of X_test and every element of X_train in `compute_distances`.
# 2. We split the dataset into 5 folds for cross-validation in `split_folds`.
# 3. For each fold, and for different values of `k`, we predict the labels and measure accuracy.
# 4. Using the best `k` found through cross-validation, we measure accuracy on the test set.
#
# Resources for understanding cross-validation:
# https://towardsdatascience.com/why-and-how-to-cross-validate-a-model-d6424b45261f
# +
from k_nearest_neighbor import compute_distances
# Step 1: compute the distances between all features from X_train and from X_test
dists = compute_distances(X_test_flat, X_train_flat)
assert dists.shape == (160, 800)
print("dists shape:", dists.shape)
# +
from k_nearest_neighbor import predict_labels
# We use k = 1 (which corresponds to only taking the nearest neighbor to decide)
y_test_pred = predict_labels(dists, y_train, k=1)
# Compute and print the fraction of correctly predicted examples
num_test = y_test.shape[0]
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('Got %d / %d correct => accuracy: %f' % (num_correct, num_test, accuracy))
# -
# ### Cross-Validation
#
# We don't know the best value for our parameter `k`.
# There is no theory on how to choose an optimal `k`, and the way to choose it is through cross-validation.
#
# We **cannot** compute any metric on the test set to choose the best `k`, because we want our final test accuracy to reflect a real use case. This real use case would be a setting where we have new examples come and we classify them on the go. There is no way to check the accuracy beforehand on that set of test examples to determine `k`.
#
# Cross-validation will make use split the data into different fold (5 here).
# For each fold, if we have a total of 5 folds we will have:
# - 80% of the data as training data
# - 20% of the data as validation data
#
# We will compute the accuracy on the validation accuracy for each fold, and use the mean of these 5 accuracies to determine the best parameter `k`.
# +
from k_nearest_neighbor import split_folds
# Step 2: split the data into 5 folds to perform cross-validation.
num_folds = 5
X_trains, y_trains, X_vals, y_vals = split_folds(X_train_flat, y_train, num_folds)
assert X_trains.shape == (5, 640, 4096)
assert y_trains.shape == (5, 640)
assert X_vals.shape == (5, 160, 4096)
assert y_vals.shape == (5, 160)
# +
# Step 3: Measure the mean accuracy for each value of `k`
# List of k to choose from
k_choices = list(range(5, 101, 5))
# Dictionnary mapping k values to accuracies
# For each k value, we will have `num_folds` accuracies to compute
# k_to_accuracies[1] will be for instance [0.22, 0.23, 0.19, 0.25, 0.20] for 5 folds
k_to_accuracies = {}
for k in k_choices:
print("Running for k=%d" % k)
accuracies = []
for i in range(num_folds):
# Make predictions
fold_dists = compute_distances(X_vals[i], X_trains[i])
y_pred = predict_labels(fold_dists, y_trains[i], k)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_pred == y_vals[i])
accuracy = float(num_correct) / len(y_vals[i])
accuracies.append(accuracy)
k_to_accuracies[k] = accuracies
# +
# plot the raw observations
plt.figure(figsize=(12,8))
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# +
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 26% accuracy on the test data.
best_k = None
### YOUR CODE HERE
# Choose the best k based on the cross validation above
pass
### END YOUR CODE
y_test_pred = predict_labels(dists, y_train, k=best_k)
# Compute and display the accuracy
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('For k = %d, got %d / %d correct => accuracy: %f' % (best_k, num_correct, num_test, accuracy))
# -
# ## Part 6.2: Cross Validation on HOG Features (10 Points)
#
# We're now going to try to classify the test images using the k-nearest neighbors algorithm on HOG features!
# +
# Create HOG datasets
X_train_hog = [hog_feature(x)[0] for x in X_train]
X_test_hog = [hog_feature(x)[0] for x in X_test]
print("Loaded {} HoG features.".format(len(X_train_hog)))
print("Loaded {} HoG features.".format(len(X_test_hog)))
X_train_hog = np.stack(X_train_hog)
X_test_hog = np.stack(X_test_hog)
print("HOG Training data shape:", X_train_hog.shape)
print("HOG Test data shape:", X_test_hog.shape)
# +
# Create Cross Validation datasets
num_folds = 5
X_hog_trains, y_trains, X_hog_vals, y_vals = split_folds(X_train_hog, y_train, num_folds)
# List of k to choose from
k_choices = list(range(5, 101, 5))
k_to_accuracies = {}
for k in k_choices:
print("Running for k=%d" % k)
accuracies = []
for i in range(num_folds):
# Make predictions
fold_dists = compute_distances(X_hog_vals[i], X_hog_trains[i])
y_pred = predict_labels(fold_dists, y_trains[i], k)
# Compute and print the fraction of correctly predicted examples
num_correct = np.sum(y_pred == y_vals[i])
accuracy = float(num_correct) / len(y_vals[i])
accuracies.append(accuracy)
k_to_accuracies[k] = accuracies
# plot the raw observations
plt.figure(figsize=(12,8))
for k in k_choices:
accuracies = k_to_accuracies[k]
plt.scatter([k] * len(accuracies), accuracies)
# plot the trend line with error bars that correspond to standard deviation
accuracies_mean = np.array([np.mean(v) for k,v in sorted(k_to_accuracies.items())])
accuracies_std = np.array([np.std(v) for k,v in sorted(k_to_accuracies.items())])
plt.errorbar(k_choices, accuracies_mean, yerr=accuracies_std)
plt.title('Cross-validation on k')
plt.xlabel('k')
plt.ylabel('Cross-validation accuracy')
plt.show()
# +
# Based on the cross-validation results above, choose the best value for k,
# retrain the classifier using all the training data, and test it on the test
# data. You should be able to get above 50% accuracy on the test data.
best_k = None
### YOUR CODE HERE
# Choose the best k based on the cross validation above
pass
### END YOUR CODE
dists = compute_distances(X_test_hog, X_train_hog)
y_test_pred = predict_labels(dists, y_train, k=best_k)
# Compute and display the accuracy
num_test = X_test_hog.shape[0]
num_correct = np.sum(y_test_pred == y_test)
accuracy = float(num_correct) / num_test
print('For k = %d, got %d / %d correct => accuracy: %f' % (best_k, num_correct, num_test, accuracy))
# -
# ### Written Questions
# **Guidance on expectations and grading:**
# These are fairly open-ended questions that don't have a black and white, right or wrong answer. Instead, there are many ways of reasoning about these questions, and we're looking for engagement with and understanding of the purpose and mechanics of HOG, K-Nearest Neighbors, Cross Validation, and splitting data into training, validation, and testing sets. As long as you meaningfully engage with these concepts, as they're relevant to each question, and show understanding of them, you'll earn full credit!
#
# **Here's some clarification on Question 3:**
# We mention a variety of performance metrics for each value of k on the cross validation results: mean, standard deviation, maximum, and minimum. Mean is represented by the middle of each error bar that has the horizontal blue line connecting all the means for each value of k, while standard deviation is the size of the error bar. Mean is the average accuracy of that value of k across all of the cross validation splits, and standard deviation is also measured across the cross validation splits. You only need to consider one of these factors to inform your choice of the 'best' k, but you're free to consider multiple or all of them in your reasoning about choosing the 'best' k. You can get full credit for accurately defending the use of any combination of these metrics.
#
# If you're stuck on Question 3, think about these questions to get you started:
# *Hint 1:* for a given value of k, what does the mean of validation set accuracy tell you about expected test set accuracy with that value of k? What does the standard deviation of validation set accuracy tell you about the uncertainty of test set accuracy with that value of k?
# *Hint 2:* you can also similarly think about minimum and maximum accuracy across the splits for a given value of k, which relates to standard deviation.
#
# **Question 1**: Why did HOG features do so much better than raw pixels? You'll notice that even the luckiest high outlier of cross validation on raw pixels is outperformed by the unluckiest low outlier in HOG. Remember that the goal of this classification task is to learn to classify the identity of a profile picture using the selected feature type. How do you think we could improve to do even better?
#
# **Your Answer:** Write your answer in this markdown cell.
# Either of both of these answers is correct, or something new as long as it demonstrates some understanding of HoG features and KNN
#
#
# **Question 2**: Why did we tell you to choose the best k from cross validation, and then evaluate accuracy for that k on the test set, instead of directly evaluating a range of k values on the test set and picking the one with the best accuracy?
#
# **Your Answer:** Write your answer in this markdown cell.
#
#
# **Question 3**: How did you decide which value of k was 'best'? In a real-world scenario, if you were deploying this K-Nearest Neighbors HOG feature classifier, how would you consider the roles of the mean, standard deviation, maximum, and/or minimum of each value of k that you observed in cross validation when choosing the 'best' k?
#
# **Your Answer:** Write your answer in this markdown cell.
#
#
| fall_2021/hw7_release/hw7.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# In this tutorial we will go through some of the most common ways of using pytac. The aim is to give you an understanding of the interface and how to find out what is available.
#
# # Loading the lattice
#
# The central object in pytac is the `lattice`. It holds the information about all of the elements in the accelerator.
#
# All the data about the lattice and its elements is stored in CSV files inside the pytac repository. We use the `load_csv` module to load the data and initialise a `lattice` object; this is the normal starting point for using pytac.
#
# The "ring mode" describes one configuration of the elements in the lattice. There is one set of CSV files for each ring mode. So when we load the lattice, we specify the ring mode we want to load.
#
# At the time of writing the normal ring mode in use at Diamond is "DIAD", so let's load that.
#
# First some required imports.
import sys, os
# Make the pytac package available from this subdirectory
sys.path.append(os.path.join(os.getcwd(), '..'))
import pytac
# Initialize the DIAD mode. The import of the Cothread channel access library will allow us to get some live values from the Diamond accelerators.
import cothread
lattice = pytac.load_csv.load('DIAD')
# The lattice object itself has some fields with its own properties:
lattice.get_fields()
# The name "live" is referring to the data source - Pytac can also be set up with additional data sources for simulation, but that isn't described here.
#
# We can ask for the values of these fields. These commands will try to get the real values from the live machine (so won't work if you're not on a suitable Diamond network).
lattice.get_value("energy")
lattice.get_value("beam_current")
# ## Families, elements and fields
#
# The elements in the lattice are grouped by families, and this is the most common way to choose some to access. We can list the available families:
lattice.get_all_families()
# Let's get all the beam position monitors (BPMs). We do this by using get_elements which takes an argument for family name - in this case we use the family name "BPM".
bpms = lattice.get_elements('BPM')
print("Got {} BPMs".format(len(bpms)))
# Let's look at what we can find out about a single BPM.
#
# Each one has some fields:
one_bpm = bpms[0]
one_bpm.get_fields()
# The fields represent a property of the BPM that can change. For example, x and y are the measured positions.
one_bpm.get_value("x")
#
# ## Devices
#
# Each field has a `device` object associated with it, which knows how to set and get the value.
one_bpm.get_device("x")
# The `device` object knows the PV names for reading and writing the value of the field. Each field might have a "setpoint" or "readback" handle, which could be associated with different PV names.
#
# You can use either strings or pytac constants to specify which handle to use.
readback_pv = one_bpm.get_pv_name("x_sofb_disabled", "readback")
same_readback_pv = one_bpm.get_pv_name("x_sofb_disabled", pytac.RB)
print(readback_pv, same_readback_pv)
# Some fields are read-only, in which case there is no setpoint PV to get.
try:
one_bpm.get_pv_name("x_sofb_disabled", pytac.SP)
except Exception as e:
print(e)
# It's not normally necessary to interact with the `device` directly; you can do most things through methods of the `element` or `lattice`. E.g. element.get_value() above and `lattice.get_element_pv_names`:
lattice.get_element_pv_names('BPM', 'y', 'readback')[:10]
# ## Unit conversions
#
# Many fields can be represented in either engineering units or physics units. For example, for a magnet field, the physics unit would be the field strength and the engineering unit would be the current applied by the magnet power supply controller.
# Get a corrector magnet
corrector = lattice.get_elements("HSTR")[5]
# Request
corrector.get_value("x_kick", units=pytac.ENG)
# In order to get the unit itslef, we have to ask for the `unitconv` object associated with the field.
corrector.get_unitconv("x_kick").eng_units
# ## Magnet fields
#
# This seems like a good time to talk about the names for the magnetic fields of magnets.
#
# In accelerator physics we refer to the different components of magnetic fields as $a_n$ for vertical fields and $b_n$ for horizontal fields, where n is:
#
# | n | Field |
# |-----|------------|
# | 0 | Dipole |
# | 1 | Quadrupole |
# | 2 | Sextupole |
# | ... | ... |
#
# These names are used for the `field`s associated with magnet `element`s in pytac.
#
# For corrector magnets, although the corrector field acts like a dipole, it is given the name `x_kick` or `y_kick` so that it can be easily distinguished. An example of this is when several magnets are combined into the same `element`. The following example shows an element which combines a corrector, a skew quadrupole and a sextupole.
an_element = lattice.get_elements("HSTR")[12]
print("Fields:", an_element.get_fields())
print("Families:", an_element.families)
# ## Other methods of the `lattice`
#
# To finish off for now, let's look at some more of the methods of the `lattice`
#
# `lattice.get_element_values` lets you get all the live values for a field from a while family of elements. E.g. the currents for the horizontal corrector magnets. There is also an analogous command `lattice.set_element_values()`.
lattice.get_element_values("HSTR", "x_kick", "readback")
# `s` position is the position of an element in metres around the ring.
#
# There is a method to get the `s` positions of all elements in a family:
lattice.get_family_s("BPM")[:10]
| jupyter/tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
# %config IPython.matplotlib.backend = "retina"
import matplotlib.pyplot as plt
import numpy as np
import sys
import astropy.units as u
from astropy.io import ascii, fits
from astropy.modeling.blackbody import blackbody_lambda
sys.path.insert(0, '../')
from libra import Spot, Star
# +
from astropy.io import fits
from glob import glob
paths = glob('/Users/bmmorris/git/freckles/data/lte*BT-Settl.spec.fits')
wavelengths = fits.getdata(paths[0])['Wavelength']
temperatures = []
model_fluxes = []
for path in paths:
hdu = fits.open(path)
temperatures.append(hdu[1].header['PHXTEFF'])
interp_flux = np.interp(wavelengths, hdu[1].data['Wavelength'], hdu[1].data['Flux'])
model_fluxes.append(interp_flux)
model_fluxes = np.vstack(model_fluxes)
temperatures = np.array(temperatures)
# -
trappist1 = model_fluxes[np.argmin(np.abs(temperatures - 2500))] * wavelengths
spots = model_fluxes[np.argmin(np.abs(temperatures - 3636))] * wavelengths
plt.plot(wavelengths, trappist1)
plt.plot(wavelengths, spots)
# +
spot_fractional_area = np.pi * 0.02071754**2 / (4 * np.pi)
combined_spectrum = (1 - spot_fractional_area) * trappist1 + spot_fractional_area * spots
# +
from scipy.ndimage import gaussian_filter1d
fig, ax = plt.subplots(figsize=(4, 3))
ax.semilogy(wavelengths, gaussian_filter1d((combined_spectrum - trappist1)/trappist1, 100), lw=0.5)
ax.set_xlim([0.1, 6])
ax.set_ylim([0, 1e3])
ax.grid(ls=':')
ax.set_xlabel('Wavelength')
ax.set_ylabel('(Spotted - Unspotted) / Unspotted Flux')
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
fig.savefig('wavelength_dependence.pdf', bbox_inches='tight')
# +
from astropy.modeling.blackbody import blackbody_lambda
vega = blackbody_lambda(wavelengths*u.um, 9700)
# -
plt.loglog(wavelengths, gaussian_filter1d(combined_spectrum, 100))
plt.xlabel('Wavelength [$\mu$m]')
plt.ylabel('$F_\lambda \lambda$ [W m$^{-2}$]')
plt.xlim([0.1, 5])
plt.ylim([1e-20, 1e8])
| notebooks/wavelength_dependence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# ## Reflect Tables into SQLALchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
# create engine to hawaii.sqlite
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# View all of the classes that automap found
Base.classes.keys()
# Save references to each table
me = Base.classes.measurement
stat = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
# ## Bonus Challenge Assignment: Temperature Analysis II
# +
# This function called `calc_temps` will accept start date and end date in the format '%Y-%m-%d'
# and return the minimum, maximum, and average temperatures for that range of dates
def calc_temps(start_date, end_date):
"""TMIN, TAVG, and TMAX for a list of dates.
Args:
start_date (string): A date string in the format %Y-%m-%d
end_date (string): A date string in the format %Y-%m-%d
Returns:
TMIN, TAVE, and TMAX
"""
return session.query(func.min(me.tobs), func.avg(me.tobs), func.max(me.tobs)).\
filter(me.date >= start_date).filter(me.date <= end_date).all()
# For example
print(calc_temps('2012-02-28', '2012-03-05'))
# -
# Use the function `calc_temps` to calculate the tmin, tavg, and tmax
# for a year in the data set
dip = calc_temps('2016-01-01', '2016-12-31')
dip
# +
# Plot the results from your previous query as a bar chart.
pip = pd.DataFrame(dip)
pip
pip = pip.rename(columns={0 : "min", 1 : "avg", 2 : "max"})
pip
# Use "Trip Avg Temp" as your Title
# Use the average temperature for bar height (y value)
# Use the peak-to-peak (tmax-tmin) value as the y error bar (yerr)
# +
x = [1]
y = [pip["avg"]]
labels = ['temp average with y-errorbar']
tick_locations = list(x)
plt.bar(x,height = pip["avg"])
plt.xticks(tick_locations, labels)
plt.errorbar(x, y, yerr=(pip["max"]-pip["min"])/2, fmt='.k', marker="o", markersize=12);
plt.ylabel("Temp")
plt.legend(['avg','yerr'])
plt.show()
# -
# ### Daily Rainfall Average
# +
# Calculate the total amount of rainfall per weather station for your trip dates using the previous year's
# matching dates.
engine.execute("SELECT station.station, station.name, measurement.prcp, sum(measurement.tobs), station.latitude, station.longitude, station.elevation FROM station, measurement WHERE station.station=measurement.station and measurement.date >= '2017-01-01' and measurement.date <= '2017-08-23' GROUP BY station.station, station.name ORDER BY measurement.prcp desc;").all()
# Sort this in descending order by precipitation amount and list the station, name, latitude, longitude, and elevation
# -
# ### Daily Temperature Normals
# +
# Use this function to calculate the daily normals
# (i.e. the averages for tmin, tmax, and tavg for all historic data matching a specific month and day)
def daily_normals(date):
"""Daily Normals.
Args:
date (str): A date string in the format '%m-%d'
Returns:
A list of tuples containing the daily normals, tmin, tavg, and tmax
"""
sel = [func.min(me.tobs), func.avg(me.tobs), func.max(me.tobs)]
return session.query(*sel).filter(func.strftime("%m-%d", me.date) == date).all()
# For example
daily_normals("01-01")
# +
# calculate the daily normals for your trip
# push each tuple of calculations into a list called `normals`
# Set the start and end date of the trip
start_date = '2017-08-01'
end_date = '2017-08-07'
def daily_normals(day_date):
temps = session.query(func.min(me.tobs), func.avg(me.tobs), func.max(me.tobs)).\
filter(func.strftime("%m-%d", me.date) == day_date).all()
return temps
# Use the start and end date to create a range of dates
trip_dates = pd.date_range(start_date, end_date)
# Strip off the year and save a list of strings in the format %m-%d
trip_mmdd = trip_dates.strftime('%m-%d')
# Use the `daily_normals` function to calculate the normals for each date string
normal = []
for c in trip_mmdd:
#unpack daily_normals
normal.append(*daily_normals(c))
normal
# and append the results to a list called `normals`.
# -
# Load the previous query results into a Pandas DataFrame and add the `trip_dates` range as the `date` index
normals_df = pd.DataFrame(normal, trip_dates, columns = ['Tmin', 'Tavg', 'Tmax'])
normals_df
# +
# Plot the daily normals as an area plot with `stacked=False`
plt.fill_between(normals_df.index, normals_df["Tmax"], alpha=0.5)
plt.fill_between(normals_df.index, normals_df["Tavg"], alpha=0.5)
plt.fill_between(normals_df.index, normals_df["Tmin"], alpha=0.5)
plt.xticks(rotation=45)
plt.title("Daily Temperature Normals")
plt.ylabel('Temp (F)')
plt.legend(['Tmax', 'Tavg','Tmin'])
plt.show()
# -
# ## Close Session
session.close()
| temp_analysis_bonus_2_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
def escreva(msg):
tam = len(msg)+4
print('~'*tam)
print(f' {msg}')
print('~' *tam)
escreva('<NAME>')
escreva('Data Science')
escreva('Engenharia Mecânica')
# -
| .ipynb_checkpoints/EX097 - Um print especial-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={"name": "#%% md\n"}
# <img src="https://upload.wikimedia.org/wikipedia/commons/c/c7/HEIG-VD_Logo_96x29_RVB_ROUGE.png" alt="HEIG-VD Logo" width="250"/>
#
# # TB - DevOps: Mise en place de cloud-native storage
# ## Fio benchmark
#
# Le but de ce travail de bachelor est d'explorer les différentes options de Longhorn, de juger de la qualité en tant que solution de stockage de type *cloud-native* et prendre comme point de comparaison un stockage en local et un cloud public.
#
# Les tests sont basés sur le benchmarking réalisé par [Architecting-IT](https://resources.storageos.com/downloadbenchmarkreport). Dans le benchmarking d'Architecting-IT, StorageOs sort en tête comme que solution de stockage block. Les tests d'Architecting-IT sont disponibles [publiquement](https://github.com/architectingit/k8sstorage/blob/main/perfraw.sh).
#
# Les choix de méthodologie et l'analyse des résultats est faite dans le rapport final. Ce notebook contient les commandes pour réaliser le benchmark et les graphiques.
# + [markdown] pycharm={"name": "#%% md\n"}
# ## SSD/HDD local
# Les résultats de cette section ont été généré avec :
# ```bash
# $ ./fio-jobs-output/local/run-all-jobs.sh
# ```
# + pycharm={"name": "#%%\n"}
import json
# from: https://stackoverflow.com/a/3207973
from os import listdir
from os.path import isfile, join
fio_jobs_output_path_local = 'fio-jobs-output/local/jobs'
job_outputs_filenames = [f for f in listdir(fio_jobs_output_path_local) if isfile(join(fio_jobs_output_path_local, f)) and 'output' in f]
# + pycharm={"name": "#%%\n"}
job_outputs = []
# Load the results
for job_output in job_outputs_filenames:
f = open(f"{fio_jobs_output_path_local}/{job_output}")
data = json.load(f)
job_outputs.append(data)
print(job_outputs[0]['fio version'])
# + pycharm={"name": "#%%\n"}
# Samsung Magician direct comparison
for job_output in job_outputs:
if job_output['global options']['name'] == 'read_iops':
print('read_iops', job_output['jobs'][0]['read']['iops'])
if job_output['global options']['name'] == 'write_iops':
print('write_iops', job_output['jobs'][0]['write']['iops'])
# missing jobs for single threaded sequential read/write bandwidth benchmark
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Test Longhorn - Cluster IICT
# ### Configuration kubectl
# ```bash
# # Use HEIG-VD VPN
# # Visit https://kubernetes.iict.ch/login and log in with :
# # - username: <firstname.lastname>
# # - password: <<PASSWORD> (your usual password for HEIG-VD)>
# # Click on `iict` cluster under `Cluster Name`
# # Look for `Kubeconfig File` button
# # Put the content shown into `~/.kube/config`
#
# $ kubectl config get-contexts # is `iict` listed ?
# $ kubectl config use-context iict
# ```
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Benchmarking avec image
# Si le nombre de replicas est plus élevé que le nombre de noeuds disponibles, activez 'Replica Node Level Soft Anti-Affinity' dans le Longhorn UI pour permettre le scheduling des replicas doublons (au niveau du noeud). Ce paramètre est modifié pour le deployment `deployment-longhorn-04-five-replica.yaml`.
#
# ```bash
# # <XX> is the deployment number
# $ kubectl apply -f deployments/deployment-longhorn-XX-???.yaml
# # monitor deployed pod activity
# $ kubectl logs --follow $(kubectl get pods --namespace=mercado -o=jsonpath='{.items[0].metadata.name}') --namespace=mercado
# ...
# All jobs done... # wait for the benchmark to finish
# ctrl-c # stop monitoring pod activity
# $ ./iict-fio-benchmark-get-output.sh <XX>
# # clean-up ressources
# $ kubectl delete -f deployments/deployment-longhorn-XX-???.yaml
# ```
# + pycharm={"name": "#%%\n"}
# from: https://stackoverflow.com/a/3207973
from os import listdir
from os.path import isfile, join
fio_jobs_output_path_iict = 'docker/iict/fio-jobs'
job_outputs_filename = [f for f in listdir(fio_jobs_output_path_iict) if isfile(join(fio_jobs_output_path_iict, f))]
# + pycharm={"name": "#%%\n"}
job_outputs = []
# Load the results
for job_output in job_outputs_filename:
f = open(f"fio-jobs-output/iict/deployment-longhorn-01-fs/{job_output}-output")
data = json.load(f)
job_outputs.append(data)
print(job_outputs[0]['fio version'])
# -
# ### Benchmarking monitoré
# Pour ouvrir une session ssh sur un noeud de l'école, il faut installer `remmina`. Les outils suivants sont utilisés pour monitorer le benchmarking :
# * iostat
# * top
# * iftop
#
# ```bash
# $ sudo apt install remmina remmina-plugin-vnc
# $ remmina
# # search bar (RDP): rpo.lan.iict.ch
# # log in with:
# # - username: <firstname.lastname>
# # - password: <<PASSWORD> (your usual password for HEIG-VD)>
# # domain: einet
#
# # Use the search bar and look for "putty", open three sessions with:
# # - ip: 10.193.72.32
# # Open the shell session and use your credentials:
# # - username: mercado
# # - password: <secret you can right-click to paste once copied>
# # Once you got your three shell sessions running, you want to use:
# # mercado@iict-sv7164:~$ iostat 10
# # mercado@iict-sv7164:~$ top
# # mercado@iict-sv7164:~$ sudo iftop
# #
# # Additionnaly, you can get the kernel version with:
# # mercado@iict-sv7164:~$ uname -r
# # 4.15.0-147-generic
# # or other types of information:
# # mercado@iict-sv7164:~$ cat /sys/block/sda/size
# # 27341619200
# # mercado@iict-sv7164:~$ cat /sys/block/sdb/size
# # 468731008
# # mercado@iict-sv7164:~$ lsblk
# # NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT
# # sda 8:0 0 12.7T 0 disk
# # └─vg_data-lv_data 253:0 0 12.7T 0 lvm /data
# # sdb 8:16 0 223.5G 0 disk
# # ├─sdb1 8:17 0 512M 0 part /boot/efi
# # └─sdb2 8:18 0 223G 0 part /
# # ======
#
# # new shell to iict cluster
# $ kubectl apply -f deployments/deployment-longhorn-00-manual.yaml
# $ kubectl exec --namespace=mercado -it \
# $(kubectl get pods -n mercado \
# -o=jsonpath='{.items[0].metadata.name}') -- /bin/sh
# # \ ./run-all-jobs.sh # benchmark
# All jobs done... # wait for the benchmark to finish
# # \ exit # exit interactive session
# $ ./iict-fio-benchmark-get-output.sh 00
# $ kubectl delete -f deployments/deployment-longhorn-00-manual.yaml
# ```
# ## Test cloud publique : cluster AWS
# ```bash
# $ sudo apt install awscli
# $ aws configure # with your AWS credentials
# ...
# Default region name [None]: us-west-1
# # install eksctl by following:
# # https://docs.aws.amazon.com/eks/latest/userguide/eksctl.html
# $ eksctl create cluster \
# --name fiobench-mercado \
# --region us-west-1 \
# --version 1.19 \
# --nodegroup-name fiobench-node \
# --node-type t3.small \
# --nodes 1 \
# --managed
#
# ...
# 2021-07-16 14:35:53 [✔] EKS cluster "fiobench-mercado" in
# "us-west-1" region is ready
#
# # Open another shell
# $ aws eks --region us-west-1 update-kubeconfig --name fiobench-mercado
# Added new context arn:aws:eks:us-west-1:396229223364:cluster/fiobench-mercado to /home/brassens/.kube/config
# $ kubectl auth can-i "*" "*" # can you perform anything?
# yes
# $ kubectl apply -f deployments/deployment-aws.yaml
# $ kubectl get pod # verify pod is doing something
# # when your pod is running, follow its activity:
# $ kubectl logs --follow $(kubectl get pods -o=jsonpath='{.items[0].metadata.name}')
# # (Optionnal) if you want to see what's inside
# $ kubectl exec -it $(kubectl get pods -o=jsonpath='{.items[0].metadata.name}') -- /bin/sh
# / # top
# ...
# CPU: 1% usr 2% sys 0% nic 3% idle 90% io 0% irq 0% sirq
# # we see io number is high, indicating high activity
# ...
# # ===
# # After you see in the logs, "All jobs done..."
# $ ./aws-fio-benchmark-get-output.sh
# $ kubectl delete -f deployments/aws.yaml
# $ eksctl delete cluster --region=us-west-1 --name=fiobench-mercado
# ```
#
# **Note**: Pour une raison inconnue, le test 7 peut bloquer. Ouvrez une session interactive (`exec -it...`) et interrompez le test 7 avec `kill -9 <PID>` (déterminez le PID avec la commande `top`). Les tests suivants (8 et 9) s'exécuteront à leur tour. Exécuter les tests qui ont bloqués en ligne de commande et enfin récupérez les résultats avec le script `aws-fio-benchmark-get-output.sh`.
# + pycharm={"name": "#%%\n"}
fio_jobs_output_path_aws = 'fio-jobs-output/aws'
job_outputs_filename = [f for f in listdir(fio_jobs_output_path_aws) if isfile(join(fio_jobs_output_path_aws, f))]
# + pycharm={"name": "#%%\n"}
job_outputs = []
for job_output in job_outputs_filename:
f = open(f"{fio_jobs_output_path_aws}/{job_output}")
data = json.load(f)
job_outputs.append(data)
print(job_outputs[0]['fio version'])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Mise à jour de l'image de benchmarking
# Pour mettre à jour l'image que le cluster utilise pour benchmark (avec de nouveaux scénarios de tests ou des modifications), taggez un commit de ce repository avec, par exemple :
#
# ```bash
# # <Some interesting changes to tests...>
# $ git tag # what tag can we use?
# ...
# v0.1.4
# v0.1.5
# v0.1.6
# v0.1.7
#
# $ git tag v0.1.8
# # Trigger github action workflow `push_to_ghcr.yaml`
# $ git push origin tag v0.1.8
# ```
#
# Veuillez patientez, le temps que l'image soit mise en ligne par la `Github Action` puis continuez à benchmarker avec `ghcr.io/pabloheigvd/tb-fiobench:latest`.
#
# **Note**: La `Github Action` ne build que les images en `v0.*.*` et `v1.*.*`.
# -
# # Traîtement de l'output
# En s'inspirant des 9 jobs fait par Architecting-IT, on va extraire les résultats:
# +
deployments_filename = [f.split('.')[0] for f in listdir('deployments/')]
def deployment_output_folder_path(host: str, idx=None):
"""
:param host: true if the deployment is related to IICT
:param idx: the deployment
:return: the deployment folder path
"""
dep = ""
if idx is not None:
for d in deployments_filename:
if str(idx) in d:
dep = '/' + d
if host == 'local':
dep = '/jobs'
return f"fio-jobs-output/{host}{dep}"
print(deployment_output_folder_path(host='iict', idx=1))
print(deployment_output_folder_path(host='local'))
print(deployment_output_folder_path(host='aws'))
# + tags=["outputPrepend"]
def deployment_output(deployment_output_folder: str):
"""
:param deployment_output_folder:
:return: a filtered view of all jobs with relevant metrics
"""
output = {}
output['deployment name'] = deployment_output_folder
for f in listdir(deployment_output_folder):
if '-output' not in f:
continue
o = json.load(open(f"{deployment_output_folder}/{f}"))
if o['global options']['name'] == 'read_iops':
# iops_mean is a stop criteria?
output['read_iops'] = o['jobs'][0]['read']['iops']
if o['global options']['name'] == 'write_iops':
output['write_iops'] = o['jobs'][0]['write']['iops']
if o['global options']['name'] == 'read_bw':
output['read_bw_mean'] = o['jobs'][0]['read']['bw_mean']
if o['global options']['name'] == 'write_bw':
output['write_bw_mean'] = o['jobs'][0]['write']['bw_mean']
if o['global options']['name'] == 'read_latency':
# slat: submission to kernel latency
# clat: submission+completion latency
# lat: TODO verify ~s+clat
output['read_latency'] = float(o['jobs'][0]['read']['lat_ns']['mean'])/1000 # nano to micro (nsec to usec)
if o['global options']['name'] == 'write_latency':
output['write_latency'] = float(o['jobs'][0]['write']['lat_ns']['mean'])/1000 # nano to micro (nsec to usec)
if o['global options']['name'] == 'seq_read':
output['seq_read_bw'] = float(o['jobs'][0]['read']['bw_mean'])/1000 # KiB/s -> MiB/s
if o['global options']['name'] == 'seq_write':
output['seq_write_bw'] = float(o['jobs'][0]['write']['bw_mean'])/1000 # KiB/s -> MiB/s
if o['global options']['name'] == 'rw_mix':
output['rw_mix_read'] = o['jobs'][0]['read']['iops']
output['rw_mix_write'] = o['jobs'][0]['write']['iops']
return output
deployment_local = deployment_output(deployment_output_folder_path(host='local'))
deployment_01_iict = deployment_output(deployment_output_folder_path(host='iict', idx=1))
deployment_aws = deployment_output(deployment_output_folder_path(host='aws'))
print(deployment_local['deployment name'])
print(deployment_01_iict['deployment name'])
print(deployment_aws['deployment name'])
# + [markdown] pycharm={"name": "#%% md\n"}
# ## Graphiques
# On veut réaliser une comparaison avec des graphiques sur les différentes métriques testées.
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Exclusion de local benchmark
# Le benchmark réalisé en local va être exclu des graphiques.
# + pycharm={"name": "#%%\n"}
deployment_output_folder = deployment_output_folder_path(host='local')
for f in listdir(deployment_output_folder):
if '-output' not in f:
continue
o = json.load(open(f"{deployment_output_folder}/{f}"))
if o['global options']['name'] == 'read_iops':
print('read_iops\t', '{0:.2f}'.format(o['jobs'][0]['read']['iops']))
if o['global options']['name'] == 'write_iops':
print('write_iops\t', '{0:.2f}'.format(o['jobs'][0]['write']['iops']))
# no Mo/s metrics
# if o['global options']['name'] == 'seq_read':
# print()
# if o['global options']['name'] == 'seq_write':
# print()
# + pycharm={"name": "#%%\n"}
def print_difference(dep1, dep2):
"""
:param dep1: deployment 1
:param dep2: deployment 2
"""
metrics_difference = {}
for m in dep1:
if m == 'deployment name':
continue
elif 'latency' in m:
# lower is better
metrics_difference[m] = (float(dep2[m])-float(dep1[m]))/float(dep2[m])*100
else:
# higher is better
metrics_difference[m] = (float(dep1[m])-float(dep2[m]))/float(dep2[m])*100
print(f"How better is {dep1['deployment name']} from {dep2['deployment name']}?")
for m in sorted(metrics_difference):
print(m, '\t', '{0:.2f}'.format(metrics_difference[m]))
print()
print_difference(
dep1=deployment_output(deployment_output_folder_path(host='local')),
dep2=deployment_output(deployment_output_folder_path(host='iict', idx=3)),
)
print_difference(
dep1=deployment_output(deployment_output_folder_path(host='local')),
dep2=deployment_output(deployment_output_folder_path(host='aws')),
)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Comparaison cluster IICT/AWS
# + pycharm={"name": "#%%\n"}
deployments = [
# deployment_output(deployment_output_folder_path(host='local')),
# deployment_output(deployment_output_folder_path(host='iict', idx=1)),
deployment_output(deployment_output_folder_path(host='iict', idx=2)),
deployment_output(deployment_output_folder_path(host='iict', idx=3)),
deployment_output(deployment_output_folder_path(host='iict', idx=4)),
deployment_output(deployment_output_folder_path(host='iict', idx=5)),
deployment_output(deployment_output_folder_path(host='iict', idx=6)),
deployment_output(deployment_output_folder_path(host='aws')),
]
deployment_names = [d['deployment name'] for d in deployments]
deployment_names = [d.replace('fio-jobs-output/local/jobs', 'local')
.replace('fio-jobs-output/aws', 'aws')
.replace('fio-jobs-output/iict/deployment-longhorn-', '')
.replace('01-', '')
.replace('02-', '')
.replace('03-', '')
.replace('04-', '')
.replace('05-', '')
.replace('06-', '')
.replace('network-effect', '')
.replace('-', ' ')
for d in deployment_names]
for d_name in deployment_names:
print(d_name)
# + pycharm={"name": "#%%\n"}
# default figure file type and dpi
figure_format = 'svg'
figure_dpi = 1200
longhorn_color = '#7BC8F6'
longhorn_color2 = '#ADD8E6'
aws_color = '#FFA500'
aws_color2 = '#FAC205'
# https://matplotlib.org/stable/tutorials/intermediate/legend_guide.html
import matplotlib.patches as mpatches
handles = [
mpatches.Patch(color=longhorn_color, label='Longhorn'),
mpatches.Patch(color=aws_color, label='AWS'),
]
# + pycharm={"name": "#%%\n"}
from matplotlib import pyplot as plt
plt.ioff()
from IPython.display import SVG, display
metrics = [
('read_iops', 'Read IOPS', 'IOPS'),
]
def build_graph_metric(key, title, unit, higher=True, display_percentages=True):
metric = [d[key] for d in deployments]
# express Longhorn deployments metric as a percentage of AWS deployment metric
percentages = ["{0:.2f}".format(100 * m/metric[len(metric) - 1]) + "%" for m in metric[:len(metric) - 1]] + ["100%"]
fig = plt.figure(figsize=(12,5))
# display legends: https://stackoverflow.com/a/19576608
ax = fig.add_axes([0.1,0.1,0.75,0.75])
ax.set_title(f"{title}\n({'higher' if higher else 'lower'} is better)")
ax.set_xlabel('deployments')
ax.set_ylabel(unit)
bars = plt.bar(deployment_names, metric)
for i in range(len(metric)):
# rounding to 3 significant digits: https://stackoverflow.com/a/3411731
m = '%s' % ('{0:,}'.format(float('{0:.3g}'.format(metric[i]))))
# removed '.0' by not showing last 2 caracters
plt.annotate(m[:len(m) - 2], xy=(deployment_names[i],metric[i]), ha='center', va='bottom')
if display_percentages:
for idx,rect in enumerate(bars):
height = rect.get_height()
ax.text(rect.get_x() + rect.get_width()/2., 0.5*height,
s=percentages[idx],
ha='center', va='bottom')
plt.legend(handles=handles)
for b in bars[0:len(bars)-1]:
b.set_color(longhorn_color)
bars[len(bars)-1].set_color(aws_color)
img_name = f'figures/{key}.' + figure_format
plt.savefig(img_name, format=figure_format, dpi=figure_dpi)
display(SVG(img_name))
plt.close() # cannot save until plt is out of scope
# + pycharm={"name": "#%%\n"}
build_graph_metric('read_iops', 'Random read', 'IOPS')
# + pycharm={"name": "#%%\n"}
build_graph_metric('write_iops', 'Random write', 'IOPS')
# + pycharm={"name": "#%%\n"}
build_graph_metric('read_bw_mean', 'Mean read bandwidth', 'Bandwidth [???]', display_percentages=False)
# + pycharm={"name": "#%%\n"}
build_graph_metric('write_bw_mean', 'Mean write bandwidth', 'Bandwidth [???]')
# + pycharm={"name": "#%%\n"}
build_graph_metric('read_latency', 'Mean read latency', 'Latency [usec]', higher=False, display_percentages=False)
# + pycharm={"name": "#%%\n"}
build_graph_metric('write_latency', 'Mean write latency', 'Latency [usec]', higher=False)
# + pycharm={"name": "#%%\n"}
build_graph_metric('seq_read_bw', 'Mean sequential read bandwidth', 'Bandwidth [MiB/s]', display_percentages=False)
# + pycharm={"name": "#%%\n"}
build_graph_metric('seq_write_bw', 'Mean sequential write bandwidth', 'Bandwidth [MiB/s]')
# + pycharm={"name": "#%%\n"}
build_graph_metric('read_bw_mean', 'Read bandwidth mean', 'Bandwidth [KiB/s]', display_percentages=False)
# + pycharm={"name": "#%%\n"}
build_graph_metric('write_bw_mean', 'Write bandwidth mean', 'Bandwidth [KiB/s]')
# + pycharm={"name": "#%%\n"}
import numpy as np
# https://matplotlib.org/stable/gallery/lines_bars_and_markers/barchart.html
x = np.arange(len(deployments))
width = 0.35
rw_mix_read = [d['rw_mix_read'] for d in deployments]
rw_mix_write = [d['rw_mix_write'] for d in deployments]
# express Longhorn deployments metric as a percentage of AWS deployment metric
percentages_read = ["{0:.2f}".format(100 * m/rw_mix_read[len(rw_mix_read) - 1]) + "%" for m in rw_mix_read[:len(rw_mix_read) - 1]] + ["100%"]
percentages_write = ["{0:.2f}".format(100 * m/rw_mix_write[len(rw_mix_write) - 1]) + "%" for m in rw_mix_write[:len(rw_mix_write) - 1]] + ["100%"]
color1 = [longhorn_color] * (len(rw_mix_write) - 1)
color2 = [longhorn_color2] * (len(rw_mix_write) - 1)
fig, ax = plt.subplots()
fig.set_size_inches(10.5, 5.5) # manually adjust for names to fit
rects1 = ax.bar(x - width/2, rw_mix_read, width=width, label = 'random r/w mix - read (75%)', color=color1 + [aws_color])
rects2 = ax.bar(x + width/2, rw_mix_write, width=width, label = 'random r/w mix - write (25%)', color=color2 + [aws_color2])
ax.set_ylabel('IOPS') # TODO unit
ax.set_title('Read/Write mix comparison\n(higher is better)')
ax.set_xticks(x)
ax.set_xticklabels(deployment_names)
ax.legend()
# for idx,rect in enumerate(rects1):
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()/2., 0.5*height,
# s=percentages_read[idx],
# ha='center', va='bottom')
#
# for idx,rect in enumerate(rects2):
# height = rect.get_height()
# ax.text(rect.get_x() + rect.get_width()/2., 0.5*height,
# s=percentages_write[idx],
# ha='center', va='bottom')
read_truncated = ['%s' % ('{0:,}'.format(float('{0:.3g}'.format(r)))) for r in rw_mix_read]
read_truncated = [m[:len(m) - 2] for m in read_truncated]
write_truncated = ['%s' % ('{0:,}'.format(float('{0:.3g}'.format(w)))) for w in rw_mix_write]
write_truncated = [m[:len(m) - 2] for m in write_truncated]
ax.bar_label(rects1, padding=3, labels=read_truncated)
ax.bar_label(rects2, padding=3, labels=write_truncated)
# fig.tight_layout()
img_name = 'figures/rw_mix.' + figure_format
plt.savefig(img_name, format=figure_format, dpi=figure_dpi)
display(SVG(img_name))
plt.close()
| fio-benchmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LAB 01.02 - Metrics
# !wget --no-cache -O init.py -q https://raw.githubusercontent.com/rramosp/ai4eng.v1.20211.udea/main/content/init.py
import init, inspect; init.init(force_download=False); init.get_weblink()
from local.lib.rlxmoocapi import submit, session
student = session.Session(init.endpoint).login( course_id=init.course_id,
lab_id="L01.02" )
# ## General remark
#
# You do not need to use Python to solve the problems in this notebook, you can use any tool of your choice (Excel, etc.), including **pen and paper**. But
#
# ### If you want to try out in Python
#
# - `numpy` is the Python library used for vectors
# - there are operations that take a vector and produce another vector (i.e. `np.log`)
# - there are operations that take a vector and procude a number (i.e. `np.mean`)
# - there are operations that take two vectors and produce a number (see the **HINTs** below)
# - etc.
#
# For instance
# +
import numpy as np
v1 = np.array([1,2,3,4])
# the log of each element of the vector
print ( "the log =", np.log(v1) )
# the mean of all elements of the vector
print ( "the mean =", np.mean(v1) )
# multiply all elements of a vector with a scalar
print ("times two =", 2*v1)
# -
# you can always check the type of any variable
a = 2.0
type(v1), type(a)
# ## Task 01. Accuracy
#
# Compute the percentage of correct predictions **accuracy** (see [here](https://en.wikipedia.org/wiki/Sensitivity_and_specificity#Definitions)) for the following model output (`predicted`) and ground truth (`actual`).
#
# Execute the following cell to generate the data from which you must compute the metric. You may compute the metric implementing python code, or manually, or copy/pasting the actual and predicted data in Excel, etc.
#
# **CHALLENGE**: use Python with [`sklearn.metrics.accuracy_score`](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.accuracy_score.html#sklearn.metrics.accuracy_score)
#
#
# Observe that every time you execute the following cell, **a different set of values** is generated. You will have to compute the metric **for the values that you see**. If you run the cell again you will have to compute your metric value again.
import numpy as np
t1_actual = np.random.randint(2, size=20)
t1_predicted = np.abs(t1_actual*(np.random.random(size=20)>(np.random.random()*.9+.05)).astype(int))
print ("actual ", ", ".join([str(i) for i in t1_actual]))
print ("predicted", ", ".join([str(i) for i in t1_predicted]))
# Assign the value of your computation to the `accuracy` variable, **with three decimal places**
accuracy =
# #### submit your answer
student.submit_task(globals(), task_id="task_01");
# ## Task 2: Sensitivity
#
# Compute the sensitivity metric [aka the _True Positive Rate_ or _Recall_ see [Sensitivity on Wikipedia](https://en.wikipedia.org/wiki/Sensitivity_and_specificity)] for the following model output (`predicted`) and ground truth (`actual`)
#
# Execute the following cell to generate the data from which you must compute the metric. You may compute the metric implementing python code, or manually, or copy/pasting the actual and predicted data in Excel, etc.
#
# **Challenge**: Use Python [`sklearn.metrics.recall_score`](https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html)
#
#
# Observe that every time you execute the following cell, **a different set of values** is generated. You will have to compute the metric **for the values that you see**. If you run the cell again you will have to compute your metric value again.
import numpy as np
t2_predicted = np.random.randint(2, size=20)
t2_actual = np.random.randint(2, size=20)
t2_predicted[np.argwhere(t2_actual==1)[0][0]]=0
print ("actual ", ", ".join([str(i) for i in t2_actual]))
print ("predicted", ", ".join([str(i) for i in t2_predicted]))
# Assign the value of your computation to the `tpr` variable **with three decimal places**
tpr =
# #### submit your answer
student.submit_task(globals(), task_id="task_02");
# ## Task 3: Evaluation in New York City Taxi Trip Duration Kaggle Competition
#
# Understand the data and the evaluation metric (**Root Mean Squared Logarithmic Error**, RMSLE) of the following Kaggle competition
#
# - [https://www.kaggle.com/c/nyc-taxi-trip-duration/](https://www.kaggle.com/c/nyc-taxi-trip-duration/)
#
# Observe that this competition is a **regression task** as we are measuring the difference in prediction with respect to the actual.
#
# For instance, the following model predictions and ground truth:
#
# actual [66 37 22]
# predicted [79 51 67]
#
# produce a **RMSLE** of 0.66 aprox.
#
# Execute the following cell to generate the data from which you must compute the metric. You may compute the metric implementing python code, or manually, or copy/pasting the actual and predicted data in Excel, etc.
#
# **Challenge**: For python use numpy function `np.log` or `np.log1p`
#
# Observe that every time you execute the following cell, **a different set of values** is generated. You will have to compute the metric **for the values that you see**. If you run the cell again you will have to compute your metric value again.
t3_actual = np.random.randint(80,size=15)+20
t3_predicted = np.random.randint(80,size=15)+20
print ("actual ", t3_actual)
print ("predicted", t3_predicted)
# Assign the value of your computation to the `rmsle` variable **with three decimal places**
rmsle =
# #### submit your answer
student.submit_task(globals(), task_id="task_03");
# ## Task 4: Evaluation in Shelter Animal Outcomes Kaggle Competition
#
# Understand the data and the evaluation metric (**Multiclass Logaritmic Loss**, _logloss_) of the following Kaggle competition
#
# - [https://www.kaggle.com/c/shelter-animal-outcomes/](https://www.kaggle.com/c/shelter-animal-outcomes/)
#
# Observe that this competition is a **classification task with 5 classes** and, for each item, the model produces a probability for each class. Classes are numbered from 0 to 4.
#
# For instance, the following represents the model output for **three items**
#
# [[0.17 0.27 0.03 0.31 0.21]
# [0.09 0.44 0.02 0.15 0.3 ]
# [0.26 0.18 0.25 0.2 0.11]]
#
# Where the classes with gretest probability assigned by the model are
#
# - class 3 for the first item (with 0.31 probability)
# - class 1 for the second item (with 0.44 probability)
# - class 0 for the third item (with 0.26 probability)
#
# The class labels are expressed as a similar matrix, but with 0/1
# For instance, the ground truth for the corresponding three items above, could be:
#
# [[0 0 0 1 0]
# [0 0 1 0 0]
# [1 0 0 0 0]]
#
# and will produce a **logloss** of approx 2.14
#
# Execute the following cell to generate the data from which you must compute the metric. You may compute the metric implementing python code, or manually, or copy/pasting the actual and predicted data in Excel, etc.
# +
import numpy as np
t4_predicted = np.random.random(size=(7,5)).T+0.5
t4_predicted = np.round((t4_predicted/np.sum(t4_predicted,axis=0)),2).T
t4_actual = np.eye(5)[np.random.randint(5,size=len(t4_predicted))].astype(int)
print ("actual")
print (t4_actual)
print ("\npredicted")
print (t4_predicted)
# -
# Assign the value of your computation to the `logloss` variable **with three decimal places**
logloss =
# #### submit your answer
student.submit_task(globals(), task_id="task_04");
| content/LAB 01.02 - METRICS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction:
# - This is a data set used to predict heart disease. Patients were classified as having or not having heart disease based on cardiac catheterization, the gold standard. If they had more than 50% narrowing of a coronary artery they were labeled as having heart disease.
# - In this cohort, there are 270 patients and there are 13 independent predictive variables or column attributes.
# - After this dataset became available, the UCI data repository made another cohort available with 303 patients. They shared this with Kaggle which is a data competition initiative. First, the file format is .data which is uncommonly used. Secondly, the outcome was reversed by accident. This is why we are still using the older cohort of patients
# + active=""
# Importing Libraries
# -
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
import seaborn as sns
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score
from sklearn.ensemble import RandomForestClassifier
# + active=""
# # data collection and processing
# -
#loading heart.csv data
heart_data= pd.read_csv("Heart.csv")
#print of top 5 rows of data
heart_data.head()
#print of last 5 rows of data
heart_data.tail()
#shape of the dataset
heart_data.shape
#print info of dataset
heart_data.info()
#checking for missing values
heart_data.isnull().sum()
#some of statistical measures of dataset
heart_data.describe()
#heatmap
corr_matric = heart_data.corr()
top_corr_features = corr_matric.index
plt.figure(figsize=(20,20))
sns.heatmap(heart_data[top_corr_features].corr(),annot=True)
#visualizing the frequency
heart_data.hist()
plt.show()
# count plot of target column
sns.countplot(x="target",data=heart_data)
sns.countplot(x="sex",data=heart_data)
sns.countplot(x="cp",data=heart_data)
sns.countplot(x="trestbps",data=heart_data)
sns.countplot(x="exang",data=heart_data)
sns.countplot(x="thal",data=heart_data)
sns.countplot(x="fbs",data=heart_data)
heart_data.target.value_counts()
# when target is 1 it means the person has a chance of heart attack
# when target is 0 it means the person might not have a heart attack
# splitting of dataset
x= heart_data.drop(columns="target", axis=1)
y= heart_data["target"]
# splitting the data into train and test data
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,stratify=y, random_state=2)
#model training
#logistic regression
model=LogisticRegression()
#training the model using train data
model.fit(x_train,y_train)
# + active=""
# #model evaluation
# + active=""
# #accuracy score
# -
#accuracy score on train data
x_train_prediction =model.predict(x_train)
training_data_accuracy = accuracy_score(x_train_prediction, y_train)
print("accuracy score for training data : ",training_data_accuracy)
#accuracy score on test data
x_test_prediction =model.predict(x_test)
test_data_accuracy = accuracy_score(x_test_prediction, y_test)
print("accuracy score for test data : ",test_data_accuracy)
# KNN:
# - The k-nearest neighbors (KNN) algorithm is a simple, supervised machine learning algorithm that can be used to solve both classification and regression problems.
scores = {}
knn_cv = KNeighborsClassifier(n_neighbors=3)
cv_scores = cross_val_score(knn_cv, x, y, cv=5)
scores['Knn'] = np.mean(cv_scores)
print(f"KNN SCORE : {np.mean(cv_scores)}")
# Random forest regressor:
#
# - A random forest is a meta estimator that fits a number of classifying decision trees on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting.
rfc = RandomForestClassifier(random_state = 42)
cv_scores = cross_val_score(rfc, x, y, cv=5)
scores['RandomForest'] = np.mean(cv_scores)
print(f"Random Forest Score : {np.mean(cv_scores)}")
# Hence, the accuracy of logistic regression is more, this model is used to build the model.
| Datascience_With_Python/Machine Learning/Audios/Heart Failure Prediction/heart_disease_prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: hoezithet
# language: python
# name: hoezithet
# ---
import bokeh
from bokeh.plotting import figure, show
from bokeh.io import output_notebook, save
from bokeh.embed import json_item
import json
from pathlib import Path
output_notebook()
def f(x): return -(1/2)*x**2 + 4*x - 6
# +
from hoezithet import graphs
xs = [5]
ys = [f(x) for x in xs]
p = graphs.get_plot(x_color='#ff6300', y_color='#19a974')
p.title.text = 'x en y als coördinaat'
p.title.text_font = 'Quicksand'
p.title.text_font_size = '18pt'
p.title.align = 'center'
p.circle(xs, ys, radius=.4, color='#555555')
item = json.dumps(json_item(p))
Path('./plt/single_x.json').write_text(item)
# +
import numpy as np
xs = np.arange(-1, 6, 1).tolist()
xs = sorted(xs)
ys = [f(x) for x in xs]
p = graphs.get_plot(x_color='#ff6300', y_color='#19a974')
p.title.text = '7 x- en y-waarden van f(x)'
p.title.text_font = 'Quicksand'
p.title.text_font_size = '18pt'
p.title.align = 'center'
p.circle(xs, ys, radius=.4, color='#555555')
item = json.dumps(json_item(p))
Path('plt/multiple_x.json').write_text(item)
# +
p = graphs.get_plot(x_color='#ff6300', y_color='#19a974', hover_format='{0.[00]}')
p.title.text = '100 x- en y-waarden van f(x)'
p.title.text_font = 'Quicksand'
p.title.text_font_size = '18pt'
p.title.align = 'center'
x1s = np.arange(-1, 9, 0.01).tolist()
y1s = [f(x) for x in x1s]
p.circle(x1s, y1s, radius=.2, color='#555555', alpha=0.4)
p.tools[0].mode = 'vline' # Trigger hover on vertical lines
# x2s = np.arange(-2, 10, 0.1).tolist()
# y2s = [f(x) for x in x2s]
# p.line(x2s, y2s, line_width=4, line_color='#e7040f')
item = json.dumps(json_item(p))
Path('plt/loads_of_x.json').write_text(item)
# +
import math
y1s = np.arange(0, 10, 0.1).tolist()
x1s = [(1/5)*y**2 - 5 for y in y1s]
y2s = np.arange(-5, 0, 0.1).tolist()
x2s = [(1/5)*y**2 - 5 for y in y2s]
p = graphs.get_plot()
p.title.text = 'Grafiek van een niet-functie'
p.title.text_font = 'Quicksand'
p.title.text_font_size = '18pt'
p.title.align = 'center'
p.tools[0].mode = 'vline' # Trigger hover on vertical lines
p.line(x1s, y1s, line_width=10, color=graphs.BLUE, line_cap='round')
p.line(x2s, y2s, line_width=10, color=graphs.BLUE, line_cap='round')
item = json.dumps(json_item(p))
Path('plt/no_fx.json').write_text(item)
| content/lessen/wiskunde/functies/grafiek/grafieken.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
# ## Convolutional neural networks
import torch
import torchvision
import torchvision.transforms as transforms
# MNIST dataset
transform = transforms.Compose(
[transforms.ToTensor()])
train_dataset = torchvision.datasets.MNIST(root='~', train=True, transform=transform, download=True)
test_dataset = torchvision.datasets.MNIST(root='~', train=False, transform=transform)
# create training and testing data
trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=4,
shuffle=True, num_workers=0)
testloader = torch.utils.data.DataLoader(test_dataset, batch_size=4,
shuffle=False, num_workers=0)
# Flattens the dimensions of a convolutional network. This module does not exist in pytorch so we have
# to create it
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
shape = torch.prod(torch.tensor(x.shape[1:])).item()
return x.view(-1, shape)
model = nn.Sequential(
nn.Conv2d(1, 3, kernel_size=5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(3, 2, kernel_size=3, stride=1, padding=0),
nn.ReLU(),
Flatten(),
nn.Linear(288, 20),
nn.ReLU(),
nn.Linear(20, 10),
)
# Loss and optimizer
learning_rate = 0.01
num_epochs = 10
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(trainloader)
loss_list = []
acc_list = []
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(trainloader):
# Run the forward pass
outputs = model(images)
loss = criterion(outputs, labels)
loss_list.append(loss.item())
# Backprop and perform Adam optimisation
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Track the accuracy
total = labels.size(0)
_, predicted = torch.max(outputs.data, 1)
correct = (predicted == labels).sum().item()
acc_list.append(correct / total)
if (i + 1) % 1000 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}, Accuracy: {:.2f}%'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item(),
(correct / total) * 100))
# # Recurrent neural networks
# Lets generate the movement of a spring
# +
import numpy as np
import torch
np.random.seed(2)
T = 20
L = 1000
N = 100
x = np.empty((N, L), 'int64')
x[:] = np.array(range(L)) + np.random.randint(-4 * T, 4 * T, N).reshape(N, 1)
data = np.sin(x / 1.0 / T).astype('float32')
# -
data.shape
X0 = data[:, :-10]
Y0 = data[:, 10:]
# randomize the data
dataloader = DataLoader(TensorDataset(
torch.from_numpy(X0).reshape(X0.shape[0], X0.shape[1], -1),
torch.from_numpy(Y0).reshape(Y0.shape[0], Y0.shape[1], -1)), batch_size=10,
shuffle=True)
# Flattens the dimensions of a convolutional network
class CustomLSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers=1):
super(CustomLSTM, self).__init__()
self.lstm = nn.LSTM(input_size, hidden_size, num_layers)
def forward(self, x):
output, _ = self.lstm(x)
return output
model = nn.Sequential(CustomLSTM(1, 10), nn.Linear(10, 1))
num_epochs = 10
learning_rate = 0.01
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
# Train the model
total_step = len(dataloader)
loss_list = []
acc_list = []
for epoch in range(num_epochs):
for i, (inputs, labels) in enumerate(dataloader):
outputs = model(inputs)
loss = criterion(outputs, labels)
loss_list.append(loss.item())
# Backprop and perform Adam optimisation
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i + 1) % 10 == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch + 1, num_epochs, i + 1, total_step, loss.item())
)
plt.plot(model(inputs)[0, :, 0].detach().numpy(), 'r--')
plt.plot(labels[0, :, 0].detach().numpy(), 'b');
plt.figure()
| notebooks/pytorch_cnn_rnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# import cell
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# +
def clean_prep(dataframe):
"""
function that removes labels from in between cycles
and adds a new column which contains the time in seconds
"""
drop = ['Mode', 'Rest', 'Charge CC', 'Discharge CC', 'TestTime']
times = dataframe['time']
drop_index = []
index = []
for i in range(len(dataframe['time'])):
time_0 = str(times[i])
if time_0 in drop:
drop_index.append(i)
else:
index.append(i)
dataframe = dataframe.drop(drop_index)
# add column with time converted to seconds
t_sec = []
times = dataframe['time']
for i in range(len(index)):
j = index[i]
time_0 = str(times[j])
if len(time_0) < 10:
days = 0
hours, minutes, seconds = time_0.split(':')
else:
days, time = time_0.split('-')
hours, minutes, seconds = time.split(':')
sec = int(days)*86400 + int(hours)*3600 + int(minutes)*60 + int(seconds)
t_sec.append(sec)
dataframe['time_sec'] = t_sec
dataframe = dataframe.set_index('index')
return dataframe, drop_index
def import_data(filename, sheet_count):
"""function to import and concatenate excel data"""
names = ['index', 'time', 'voltage', 'current', 'capacity', 'state']
df = pd.read_excel(filename, sheet_name='Sheet1', header=None, names=names)
dfl = list([df])
if sheet_count == 1:
pass
else:
for i in range(1, sheet_count):
sheet_name='Sheet1(Continued{})'.format(i)
df0 = pd.read_excel(filename, sheet_name, header=None, names=names)
dfl.append(df0)
dff = pd.concat(dfl, ignore_index=True)
return dff
def return_cycle_indeces(cycle, cycle_break):
"""docstring"""
cycle_data = []
start = cycle_break[cycle - 1] + 2
end = cycle_break[cycle] - 1
cycle_index = np.arange(start, end)
return cycle_index
def clean_prep_break(dataframe):
"""
function that removes labels from in between cycles,
adds a new column which contains the time in seconds,
and outputs a list of cycle_breaks
"""
drop = ['Mode', 'Rest', 'Charge CC', 'Discharge CC', 'TestTime']
times = dataframe['time']
drop_index = []
index = []
for i in range(len(dataframe['time'])):
time_0 = str(times[i])
if time_0 in drop:
drop_index.append(i)
else:
index.append(i)
dataframe2 = dataframe.drop(drop_index)
# add column with time converted to seconds
t_sec = []
times = dataframe2['time']
for i in range(len(dataframe2['time'])):
j = index[i]
time_0 = str(times[j])
if len(time_0) < 10:
days = 0
hours, minutes, seconds = time_0.split(':')
else:
days, time = time_0.split('-')
hours, minutes, seconds = time.split(':')
sec = int(days)*86400 + int(hours)*3600 + int(minutes)*60 + int(seconds)
t_sec.append(sec)
dataframe2['time_sec'] = t_sec
# converts drop_index to a list of cycle_breaks
cycle_break = []
for i in range(len(dataframe.index)):
if i - 1 in drop_index and i + 1 in drop_index:
cycle_break.append(i)
else:
pass
return dataframe2, cycle_break
#**********************************************************
# +
# folder holding data of interest
folder = '../Data/Li-Li/Data_2_7_19/'
# imports and concatenates the data for each of the 10 symmetric cells
PP_A_0 = import_data(folder + '_002_4.xls', 3)
PP_B_0 = import_data(folder + '_002_6.xls', 3)
PEK_A_0 = import_data(folder + '_002_8.xls', 3)
PEK_B_0 = import_data(folder + '_005_7.xls', 3)
PEJ_A_0 = import_data(folder + '_007_4.xls', 3)
PEJ_B_0 = import_data(folder + '_007_5.xls', 3)
PI_A_0 = import_data(folder + '_007_6.xls', 3)
PI_B_0 = import_data(folder + '_007_8.xls', 3)
Ny_A_0 = import_data(folder + '_008_1.xls', 3)
Ny_B_0 = import_data(folder + '_007_7.xls', 3)
# -
# cleans and preps dataframes for plotting
PP_A, PP_A_break = clean_prep_break(PP_A_0)
PP_B, PP_B_break = clean_prep_break(PP_B_0)
PEK_A, PEK_A_break = clean_prep_break(PEK_A_0)
PEK_B, PEK_B_break = clean_prep_break(PEK_B_0)
PEJ_A, PEJ_A_break = clean_prep_break(PEJ_A_0)
PEJ_B, PEJ_B_break = clean_prep_break(PEJ_B_0)
PI_A, PI_A_break = clean_prep_break(PI_A_0)
PI_B, PI_B_break = clean_prep_break(PI_B_0)
Ny_A, Ny_A_break = clean_prep_break(Ny_A_0)
Ny_B, Ny_B_break = clean_prep_break(Ny_B_0)
plt.plot(PP_A['time_sec'], PP_A['voltage'])
plt.plot(PP_B['time_sec'], PP_B['voltage'])
plt.plot(PEK_A['time_sec'], PEK_A['voltage'])
plt.plot(PEK_B['time_sec'], PEK_B['voltage'])
plt.plot(PEJ_A['time_sec'], PEJ_A['voltage'])
plt.plot(PEJ_B['time_sec'], PEJ_B['voltage'])
plt.plot(PI_A['time_sec'], PI_A['voltage'])
plt.plot(PI_B['time_sec'], PI_B['voltage'])
plt.plot(Ny_A['time_sec'], Ny_A['voltage'])
plt.plot(Ny_B['time_sec'], Ny_B['voltage'])
plt.plot(PP_A['time_sec'], PP_A['voltage'])
plt.plot(PEK_A['time_sec'], PEK_A['voltage'])
plt.plot(PEJ_A['time_sec'], PEJ_A['voltage'])
plt.plot(PI_B['time_sec'], PI_B['voltage'])
plt.plot(Ny_A['time_sec'], Ny_A['voltage'])
PP_A_break
PP_A.head(490)
# +
cycle_indeces = return_cycle_indeces(193, PP_A_break)
cycle_data = PP_A[PP_A.index.isin(cycle_indeces)]
plt.plot(cycle_data['time_sec'], cycle_data['voltage'])
# -
# To do:
#
# 1. break the cycle_break data in charge and discharge lists
# 2. write a function for plotting a specific cycle
# 3. write a function that will allow for plotting a range of cycles
| Dev_1/Li-Li_dev_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/sangcheon38/11.29/blob/main/Untitled1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="A7u0x_dh_aTL"
import numpy as np
# + id="08-2WlQhpTHB" colab={"base_uri": "https://localhost:8080/"} outputId="333b3c0d-6a08-438e-a0ca-94e9b003ec77"
A = np.array([[0,1,-2.3,0.1], [1.3, 4, -0.1, 0], [4.1, -1.0, 0, 1.7]])
print(A)
print(A.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="h0utw0zL_jeS" outputId="76c38a0f-908d-4b27-d125-1e7c4e4423d8"
m,n = A.shape
print('# of rows:', m)
print('# of cols:', n)
# + colab={"base_uri": "https://localhost:8080/"} id="jQ9CCdBX_-el" outputId="f433e583-2e74-4fcc-d13d-c14904f4ab8e"
tall = lambda X: X.shape[0] > X.shape[1]
tall(A)
# + colab={"base_uri": "https://localhost:8080/"} id="axC2n1ghAOaK" outputId="a441087f-95f3-444f-dff1-3c116ac5cf60"
wide = lambda X: X.shape[0] < X.shape[1]
wide(A)
# + colab={"base_uri": "https://localhost:8080/"} id="iG1I5qLwA0rf" outputId="3f234452-942a-42ab-aab1-0f7e8260fa14"
A[0,2]
# + colab={"base_uri": "https://localhost:8080/"} id="Wku_Y_olA58N" outputId="ef5b1766-2ab8-42fe-9ad1-89e9bc20211b"
A[0,2] = 7.9
A
# + colab={"base_uri": "https://localhost:8080/"} id="eT3_z7d9BnOJ" outputId="56bb93a3-0305-4f9d-ea27-8222d3b0e444"
A = np.array([[0,1,-2.3,0.1], [1.3, 4, -0.1, 0], [4.1, -1.0, 0, 1.7]])
B = A.copy()
print(A==B)
print(np.sum(A==B))
# + colab={"base_uri": "https://localhost:8080/"} id="ENUDSn4MB2lk" outputId="b352366f-76ff-4c72-ad9d-962f7a1ca4c3"
B[0,3] = 100
print(A==B)
print(np.sum(A==B))
# + colab={"base_uri": "https://localhost:8080/"} id="-hMVck-0DbcU" outputId="70bd14ce-6c44-47ee-8d40-b007f6e28251"
A = np.array([[-1, 0, 1, 0],[2, -3, 0, 1],[0, 4, -2, 1]])
A[0:2, 2:4]
# + colab={"base_uri": "https://localhost:8080/"} id="nqgLN_cKDqkQ" outputId="aaf9c9bc-5824-4d86-f399-3b856ee74518"
A[:,2]
# + colab={"base_uri": "https://localhost:8080/"} id="4MoXsd-IEgKD" outputId="9c1dc03b-27f6-431f-9ba1-80d4e20e3fa9"
A[1,:]
# + colab={"base_uri": "https://localhost:8080/"} id="tdDEjDewFRdH" outputId="55833f50-f465-4814-fb8c-891364633373"
A.reshape((6,2))
# + colab={"base_uri": "https://localhost:8080/"} id="0wzGodVbFaiA" outputId="87651580-7569-4017-c48d-284f5ff560b2"
A.reshape((4,3))
# + colab={"base_uri": "https://localhost:8080/"} id="Hwm9JqPKFc7D" outputId="b37b5c80-08a9-494d-c427-9209b99cf465"
A.reshape((1,12))
# + colab={"base_uri": "https://localhost:8080/"} id="tC-wtzN7GEX0" outputId="16c7b2fd-3c58-4a3d-d135-a38dddf69814"
B = np.array([0,2,3])
C = np.array([-1])
D = np.array([[2,2,1],[1,3,5]])
E = np.array([[4],[4]])
A = np.block([[B,C],[D,E]])
A
# + colab={"base_uri": "https://localhost:8080/"} id="HlK16yHDFfcW" outputId="ed9d9027-7981-4a51-cd59-6117ae32f8b6"
a = [1,2]
b = [4,5]
c = [7,8]
A = np.vstack([a,b,c])
B = np.hstack([a,b,c])
print(A)
print(B)
# + colab={"base_uri": "https://localhost:8080/"} id="OKqC1et5HVFm" outputId="2e6bcb77-ce63-4f31-d678-d704c4013476"
a = [[1],[2]]
b = [[4],[5]]
c = [[7],[8]]
A = np.vstack([a,b,c])
B = np.hstack([a,b,c])
print(A)
print(B)
# + colab={"base_uri": "https://localhost:8080/"} id="gX4kcnCQHnc3" outputId="3049fd9b-c8af-42a4-f3e1-4b77abf94a9b"
np.zeros((3,4))
# + colab={"base_uri": "https://localhost:8080/"} id="m8shNr_-JLPx" outputId="83e2b67f-aab2-4145-8c30-e4fdbd0c4ca5"
np.ones((3,4))
# + colab={"base_uri": "https://localhost:8080/"} id="4fOEjikgJQvV" outputId="ac316e35-91ca-424a-c3a7-c67eee541317"
np.identity((4))
# + colab={"base_uri": "https://localhost:8080/"} id="7TQyqj2rJUds" outputId="2c5365a5-2dfb-4460-cfde-fda6ac987fff"
x = np.array([[0, 1, 2],[3, 4, 5],[6, 7, 8]])
v = np.diag(x)
v
# + colab={"base_uri": "https://localhost:8080/"} id="d7N2YSb_Je3N" outputId="bd786b7a-1bd8-4082-9b47-84e2eda538f6"
y = np.diag(v)
y
# + colab={"base_uri": "https://localhost:8080/"} id="DRkTmpmyJq7l" outputId="2c6e9b0d-9e42-439b-d815-349397a9ca2e"
np.random.random((3,4))
# + colab={"base_uri": "https://localhost:8080/"} id="OQVfzt-EMEb-" outputId="0d67b132-9350-44c9-a616-7dc319283abf"
np.random.randn(3,4)
# + colab={"base_uri": "https://localhost:8080/"} id="1gjjVo_WMbcw" outputId="3a639948-e359-4e6d-c1d0-68389c8a4394"
H = np.array([[0,1,-2,1], [2,-1,3,0]])
H.T
# + colab={"base_uri": "https://localhost:8080/"} id="VkPiOuMtMfGd" outputId="b86ac249-b6ed-4bbe-80f8-cc68908c1043"
np.transpose(H)
# + colab={"base_uri": "https://localhost:8080/"} id="nmHLZLOKMIuI" outputId="2974afd7-5781-41d4-a5b1-42c524633baf"
U = np.array([[0,4], [7,0], [3,1]])
V = np.array([[1,2], [2,3], [0,4]])
U + V
# + colab={"base_uri": "https://localhost:8080/"} id="WpiK5qwqMXsN" outputId="54510c1d-8e67-40d5-c9f1-f53e92d06f1b"
2.2*U
# + colab={"base_uri": "https://localhost:8080/"} id="r047sUAMMiXS" outputId="5cdf6bf6-147b-45ba-a570-aff054095c56"
A = np.array([[12,13,-11], [10,-11,34]])
np.linalg.norm(A)
# + colab={"base_uri": "https://localhost:8080/"} id="qOhQ_6EkMrFB" outputId="ee7e1abc-78b5-4feb-acb9-2851673cd6fd"
A = np.array([[0,2,-1],[-2,1,1]])
x = np.array([2,1,-1])
A @ x
# + colab={"base_uri": "https://localhost:8080/"} id="1kEsilkXNPbb" outputId="794e0e5d-7690-4b2a-b7f0-bd05949d09e9"
Rot = lambda theta: [[np.cos(theta), -np.sin(theta)],[np.sin(theta), np.cos(theta)]]
R = Rot(np.pi/3)
R
# + id="LSqMCL6UN2Z8"
points = np.array([[1,0],[1.5,0],[2,0],[1,0.25],[1.5,0.25],[1,0.5]])
# + id="LeUz3vyFN6ik"
rpoints = np.array([R @ p for p in points])
# + colab={"base_uri": "https://localhost:8080/"} id="RypwUIVBOan_" outputId="9b25d281-5ad6-42d3-d222-364385df1540"
print(points)
print(rpoints)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="RxQv7VDvN_yd" outputId="16d64282-367d-4b5b-a29d-c9c2b46ae90b"
import matplotlib.pyplot as plt
plt.ion()
plt.scatter([c[0] for c in points], [c[1] for c in points])
plt.scatter([c[0] for c in rpoints],[c[1] for c in rpoints])
plt.show()
| Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:dlnd]
# language: python
# name: conda-env-dlnd-py
# ---
# # Your first neural network
#
# In this project, you'll build your first neural network and use it to predict daily bike rental ridership. We've provided some of the code, but left the implementation of the neural network up to you (for the most part). After you've submitted this project, feel free to explore the data and the model more.
#
#
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# -
# ## Load and prepare the data
#
# A critical step in working with neural networks is preparing the data correctly. Variables on different scales make it difficult for the network to efficiently learn the correct weights. Below, we've written the code to load and prepare the data. You'll learn more about this soon!
# +
data_path = 'Bike-Sharing-Dataset/hour.csv'
rides = pd.read_csv(data_path)
# -
rides.head()
# ## Checking out the data
#
# This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the `cnt` column. You can see the first few rows of the data above.
#
# Below is a plot showing the number of bike riders over the first 10 days in the data set. You can see the hourly rentals here. This data is pretty complicated! The weekends have lower over all ridership and there are spikes when people are biking to and from work during the week. Looking at the data above, we also have information about temperature, humidity, and windspeed, all of these likely affecting the number of riders. You'll be trying to capture all this with your model.
rides[:24*10].plot(x='dteday', y='cnt')
# ### Dummy variables
# Here we have some categorical variables like season, weather, month. To include these in our model, we'll need to make binary dummy variables. This is simple to do with Pandas thanks to `get_dummies()`.
# +
dummy_fields = ['season', 'weathersit', 'mnth', 'hr', 'weekday']
for each in dummy_fields:
dummies = pd.get_dummies(rides[each], prefix=each, drop_first=False)
rides = pd.concat([rides, dummies], axis=1)
fields_to_drop = ['instant', 'dteday', 'season', 'weathersit',
'weekday', 'atemp', 'mnth', 'workingday', 'hr']
data = rides.drop(fields_to_drop, axis=1)
data.head()
# -
# ### Scaling target variables
# To make training the network easier, we'll standardize each of the continuous variables. That is, we'll shift and scale the variables such that they have zero mean and a standard deviation of 1.
#
# The scaling factors are saved so we can go backwards when we use the network for predictions.
quant_features = ['casual', 'registered', 'cnt', 'temp', 'hum', 'windspeed']
# Store scalings in a dictionary so we can convert back later
scaled_features = {}
for each in quant_features:
mean, std = data[each].mean(), data[each].std()
scaled_features[each] = [mean, std]
data.loc[:, each] = (data[each] - mean)/std
# ### Splitting the data into training, testing, and validation sets
#
# We'll save the last 21 days of the data to use as a test set after we've trained the network. We'll use this set to make predictions and compare them with the actual number of riders.
# +
# Save the last 21 days
test_data = data[-21*24:]
data = data[:-21*24]
# Separate the data into features and targets
target_fields = ['cnt', 'casual', 'registered']
features, targets = data.drop(target_fields, axis=1), data[target_fields]
test_features, test_targets = test_data.drop(target_fields, axis=1), test_data[target_fields]
# -
# We'll split the data into two sets, one for training and one for validating as the network is being trained. Since this is time series data, we'll train on historical data, then try to predict on future data (the validation set).
# Hold out the last 60 days of the remaining data as a validation set
train_features, train_targets = features[:-60*24], targets[:-60*24]
val_features, val_targets = features[-60*24:], targets[-60*24:]
# ## Time to build the network
#
# Below you'll build your network. We've built out the structure and the backwards pass. You'll implement the forward pass through the network. You'll also set the hyperparameters: the learning rate, the number of hidden units, and the number of training passes.
#
# The network has two layers, a hidden layer and an output layer. The hidden layer will use the sigmoid function for activations. The output layer has only one node and is used for the regression, the output of the node is the same as the input of the node. That is, the activation function is $f(x)=x$. A function that takes the input signal and generates an output signal, but takes into account the threshold, is called an activation function. We work through each layer of our network calculating the outputs for each neuron. All of the outputs from one layer become inputs to the neurons on the next layer. This process is called *forward propagation*.
#
# We use the weights to propagate signals forward from the input to the output layers in a neural network. We use the weights to also propagate error backwards from the output back into the network to update our weights. This is called *backpropagation*.
#
# > **Hint:** You'll need the derivative of the output activation function ($f(x) = x$) for the backpropagation implementation. If you aren't familiar with calculus, this function is equivalent to the equation $y = x$. What is the slope of that equation? That is the derivative of $f(x)$.
#
# Below, you have these tasks:
# 1. Implement the sigmoid function to use as the activation function. Set `self.activation_function` in `__init__` to your sigmoid function.
# 2. Implement the forward pass in the `train` method.
# 3. Implement the backpropagation algorithm in the `train` method, including calculating the output error.
# 4. Implement the forward pass in the `run` method.
#
class NeuralNetwork(object):
def __init__(self, input_nodes, hidden_nodes, output_nodes, learning_rate):
# Set number of nodes in input, hidden and output layers.
self.input_nodes = input_nodes
self.hidden_nodes = hidden_nodes
self.output_nodes = output_nodes
# Initialize weights
self.weights_input_to_hidden = np.random.normal(0.0, self.hidden_nodes**-0.5,
(self.hidden_nodes, self.input_nodes))
self.weights_hidden_to_output = np.random.normal(0.0, self.output_nodes**-0.5,
(self.output_nodes, self.hidden_nodes))
self.lr = learning_rate
#### Set this to your implemented sigmoid function ####
# Activation function is the sigmoid function
self.activation_function = self.sigmoid
def sigmoid(self, x):
return 1.0/(1 + np.exp(-x))
def sig_derivative(self, output):
return output * (1-output)
def train(self, inputs_list, targets_list):
# Convert inputs list to 2d array
inputs = np.array(inputs_list, ndmin=2).T
targets = np.array(targets_list, ndmin=2).T
n = float(len(inputs)*2)
#print (n)
#### Implement the forward pass here ####
### Forward pass ###
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)# signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs)# signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output, hidden_outputs) # signals into final output layer
final_outputs = final_inputs # signals from final output layer
#### Implement the backward pass here ####
### Backward pass ###
# TODO: Output error
output_errors = targets - final_outputs# Output layer error is the difference between desired target and actual output.
output_gradient = output_errors
hidden_errors = np.dot(self.weights_hidden_to_output.T, output_errors)
# TODO: Backpropagated error
hidden_grad = self.sig_derivative(hidden_outputs) # hidden layer gradients
hidden_gradient_product = hidden_errors * hidden_grad
# TODO: Update the weights
self.weights_hidden_to_output += (self.lr * np.dot(output_gradient,hidden_outputs.T)) # update hidden-to-output weights with gradient descent step
self.weights_input_to_hidden += (self.lr * np.dot(hidden_gradient_product ,inputs.T)) # update input-to-hidden weights with gradient descent step
def run(self, inputs_list):
# Run a forward pass through the network
inputs = np.array(inputs_list, ndmin=2).T
#### Implement the forward pass here ####
# TODO: Hidden layer
hidden_inputs = np.dot(self.weights_input_to_hidden, inputs)# signals into hidden layer
hidden_outputs = self.activation_function(hidden_inputs) # signals from hidden layer
# TODO: Output layer
final_inputs = np.dot(self.weights_hidden_to_output ,hidden_outputs)# signals into final output layer
final_outputs = final_inputs # signals from final output layer
return final_outputs
def MSE(y, Y):
return np.mean((y-Y)**2)
# ## Training the network
#
# Here you'll set the hyperparameters for the network. The strategy here is to find hyperparameters such that the error on the training set is low, but you're not overfitting to the data. If you train the network too long or have too many hidden nodes, it can become overly specific to the training set and will fail to generalize to the validation set. That is, the loss on the validation set will start increasing as the training set loss drops.
#
# You'll also be using a method know as Stochastic Gradient Descent (SGD) to train the network. The idea is that for each training pass, you grab a random sample of the data instead of using the whole data set. You use many more training passes than with normal gradient descent, but each pass is much faster. This ends up training the network more efficiently. You'll learn more about SGD later.
#
# ### Choose the number of epochs
# This is the number of times the dataset will pass through the network, each time updating the weights. As the number of epochs increases, the network becomes better and better at predicting the targets in the training set. You'll need to choose enough epochs to train the network well but not too many or you'll be overfitting.
#
# ### Choose the learning rate
# This scales the size of weight updates. If this is too big, the weights tend to explode and the network fails to fit the data. A good choice to start at is 0.1. If the network has problems fitting the data, try reducing the learning rate. Note that the lower the learning rate, the smaller the steps are in the weight updates and the longer it takes for the neural network to converge.
#
# ### Choose the number of hidden nodes
# The more hidden nodes you have, the more accurate predictions the model will make. Try a few different numbers and see how it affects the performance. You can look at the losses dictionary for a metric of the network performance. If the number of hidden units is too low, then the model won't have enough space to learn and if it is too high there are too many options for the direction that the learning can take. The trick here is to find the right balance in number of hidden units you choose.
# +
import sys
### Set the hyperparameters here ###
epochs = 1200
learning_rate = 0.02 #0.095 0.095 0.05 0.11
hidden_nodes = 20 # 130 5-0.565 10-0.560 30-0.554
output_nodes = 1
N_i = train_features.shape[1]
network = NeuralNetwork(N_i, hidden_nodes, output_nodes, learning_rate)
losses = {'train':[], 'validation':[]}
for e in range(epochs):
# Go through a random batch of 128 records from the training data set
batch = np.random.choice(train_features.index, size=128)
for record, target in zip(train_features.ix[batch].values,
train_targets.ix[batch]['cnt']):
network.train(record, target)
# Printing out the training progress
train_loss = MSE(network.run(train_features), train_targets['cnt'].values)
val_loss = MSE(network.run(val_features), val_targets['cnt'].values)
sys.stdout.write("\rProgress: " + str(100 * e/float(epochs))[:4] \
+ "% ... Training loss: " + str(train_loss)[:5] \
+ " ... Validation loss: " + str(val_loss)[:5])
losses['train'].append(train_loss)
losses['validation'].append(val_loss)
# -
plt.plot(losses['train'], label='Training loss')
plt.plot(losses['validation'], label='Validation loss')
plt.legend()
plt.ylim(ymax=0.5)
# ## Check out your predictions
#
# Here, use the test data to view how well your network is modeling the data. If something is completely wrong here, make sure each step in your network is implemented correctly.
# +
fig, ax = plt.subplots(figsize=(8,4))
mean, std = scaled_features['cnt']
predictions = network.run(test_features)*std + mean
ax.plot(predictions[0], label='Prediction')
ax.plot((test_targets['cnt']*std + mean).values, label='Data')
ax.set_xlim(right=len(predictions))
ax.legend()
dates = pd.to_datetime(rides.ix[test_data.index]['dteday'])
dates = dates.apply(lambda d: d.strftime('%b %d'))
ax.set_xticks(np.arange(len(dates))[12::24])
_ = ax.set_xticklabels(dates[12::24], rotation=45)
# -
# ## Thinking about your results
#
# Answer these questions about your results. How well does the model predict the data? Where does it fail? Why does it fail where it does?
#
# > **Note:** You can edit the text in this cell by double clicking on it. When you want to render the text, press control + enter
#
# #### Your answer below
#
# The model predicts the data very well from December 11 to December 21st. It fails betweeen
# the dates December 22 and December 31, especially around 25th December and 31st December.
#
# The reason for this may be that our training model does not account for holidays where most people will not be commuting to work and spending time at home/recreation. Our model tries to predict the target on the holidays like it will for any other day and hence we observe the failures around those dates.
#
# We can possibly add a binary(0,1) record field to our data, which records whether a day was holiday or not. Then we may be able to fit the data better.
# ## Unit tests
#
# Run these unit tests to check the correctness of your network implementation. These tests must all be successful to pass the project.
# +
import unittest
inputs = [0.5, -0.2, 0.1]
targets = [0.4]
test_w_i_h = np.array([[0.1, 0.4, -0.3],
[-0.2, 0.5, 0.2]])
test_w_h_o = np.array([[0.3, -0.1]])
class TestMethods(unittest.TestCase):
##########
# Unit tests for data loading
##########
def test_data_path(self):
# Test that file path to dataset has been unaltered
self.assertTrue(data_path.lower() == 'bike-sharing-dataset/hour.csv')
def test_data_loaded(self):
# Test that data frame loaded
self.assertTrue(isinstance(rides, pd.DataFrame))
##########
# Unit tests for network functionality
##########
def test_activation(self):
network = NeuralNetwork(3, 2, 1, 0.5)
# Test that the activation function is a sigmoid
self.assertTrue(np.all(network.activation_function(0.5) == 1/(1+np.exp(-0.5))))
def test_train(self):
# Test that weights are updated correctly on training
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
network.train(inputs, targets)
self.assertTrue(np.allclose(network.weights_hidden_to_output,
np.array([[ 0.37275328, -0.03172939]])))
self.assertTrue(np.allclose(network.weights_input_to_hidden,
np.array([[ 0.10562014, 0.39775194, -0.29887597],
[-0.20185996, 0.50074398, 0.19962801]])))
def test_run(self):
# Test correctness of run method
network = NeuralNetwork(3, 2, 1, 0.5)
network.weights_input_to_hidden = test_w_i_h.copy()
network.weights_hidden_to_output = test_w_h_o.copy()
self.assertTrue(np.allclose(network.run(inputs), 0.09998924))
suite = unittest.TestLoader().loadTestsFromModule(TestMethods())
unittest.TextTestRunner().run(suite)
# -
| dlnd_neural_network_submission.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %config InlineBackend.figure_formats = ['svg']
from coronavirus import *
# # 13 April 2020
#
# - Some plots look a little suspicous / misleading
# - lots of noise
# - code base could do with refactoring
# - explore that here
# Get some test data
country = "Germany"
region, subregion = None, "SK Mannheim"
c, d = get_country_data(country, region=region, subregion=subregion)
if False:
c2 = c.drop(c[c.index > '2020-02-15'].index)
d2 = d.drop(d[d.index > '2020-02-15'].index)
# c2.loc['2020-04-01'] = np.nan # c2.loc['2020-03-31']
c2 = c2.dropna()
else:
c2 = c
d2 = d
c2.country = country
c2.label = c.label
d2.label = d.label
d2.country = d.country
overview(country=country, subregion=subregion);
# +
fig, axes = plt.subplots(2, 1, figsize=(10, 5), sharex=False)
ax = axes[0]
# ax.set_ylim([-50, 200])
plot_daily_change(ax=ax, series=c2, color="C1");
ax = axes[1]
plot_daily_change(ax=ax, series=d2, color="C0")
# +
# Source code used to develop plot_grow_factor; migth be convenient to play with
def plot_growth_factor_dev(ax, series, color, minchange=10):
"""relative change of number of new cases/deaths from day to day
See https://youtu.be/Kas0tIxDvrg?t=330, 5:30 onwards
"""
# only keep values where there is a change of a minumum number
country, label = series.country, series.label
# get smooth data from plot 1 to base this plot on
(change, change_label) , (smooth, smooth_label), \
(smooth2, smooth2_label) = compute_daily_change(series)
# Compute data points
f = smooth.pct_change() + 1 # compute ratio of subsequent daily changes
# division by zero may lead to np.inf in the data: get rid of that
f.replace(np.inf, np.nan, inplace=True) # seems not to affect plot
# Compute smoother version for line in plots
rolling = f.rolling(7, center=True, win_type='gaussian', min_periods=3).mean(std=2)
label = series.country + " " + series.label + " growth factor (based on smooth daily change)"
ax.plot(f.index, f.values, 'o', color=color, alpha=0.3, label=label)
label = series.country + " " + series.label + " smoothed growth factor"
ax.plot(rolling.index, rolling.values, '-', color=color, label=label, linewidth=LW)
ax.legend()
ax.set_ylabel("growth factor")
ax.set_ylim(0.5, 1.5) # should generally be below 1
ax.plot([series.index.min(), series.index.max()], [1.0, 1.0], '-C3') # label="critical value"
return ax
# -
fig, axes = plt.subplots(2, 1, figsize=(10, 5), sharex=False)
ax = axes[0]
plot_growth_factor_dev(ax=ax, series=c2, color="C1");
ax = axes[1]
plot_growth_factor_dev(ax=ax, series=d2, color="C0");
# +
fig, axes = plt.subplots(2, 1, figsize=(10, 5), sharex=False)
ax = axes[0]
ax.set_ylim([-50, 200])
plot_daily_change(ax=ax, series=c2,color="C1")
ax = axes[1]
plot_daily_change(ax=ax, series=d2, color="C0")
# +
country = "Laos"
region, subregion = None, None
c, d = get_country_data(country, region=region, subregion=subregion)
c2 = c.drop(c[c.index < '2020-03-15'].index)
d2 = d.drop(d[d.index < '2020-03-15'].index)
c2.loc['2020-04-01'] = np.nan # c2.loc['2020-03-31']
c2 = c2.dropna()
c2.country = country
c2 = c.drop(c[c.index < '2020-03-15'].index)
d2 = d.drop(d[d.index < '2020-03-15'].index)
c2.country = c.country
d2.country = d.country
c2.label = c.label
d2.label = d.label
# -
def overview_dev(country, region=None, subregion=None, savefig=False):
c, d = get_country_data(country, region=region, subregion=subregion)
fig, axes = plt.subplots(5, 1, figsize=(10, 12), sharex=False)
ax = axes[0]
plot_time_step(ax=ax, series=c, style="-C1")
plot_time_step(ax=ax, series=d, style="-C0")
ax = axes[1]
plot_daily_change(ax=ax, series=c, color="C1")
if country == "China":
ax.set_ylim(0, 5000)
ax = axes[2]
plot_daily_change(ax=ax, series=d, color="C0")
ax = axes[3]
plot_growth_factor(ax, series=d, color="C0")
plot_growth_factor(ax, series=c, color="C1")
ax = axes[4]
plot_doubling_time(ax, series=d, color="C0")
plot_doubling_time(ax, series=c, color="C1")
# enforce same x-axis on all plots
for i in range(1, 5):
axes[i].set_xlim(axes[0].get_xlim())
for i in range(0, 5):
axes[i].tick_params(left=True, right=True, labelleft=True, labelright=True)
axes[i].yaxis.set_ticks_position('both')
overview('Laos');
max([0, np.nan])
#country = "Germany"
country = "China"
country = "Austria"
country = "United Kingdom"
country = "Laos"
region, subregion = None, None
c, d = get_country_data(country, region=region, subregion=subregion)
if False:
c2 = c.drop(c[c.index > '2020-03-15'].index)
d2 = d.drop(d[d.index > '2020-03-15'].index)
# c2.loc['2020-04-01'] = np.nan # c2.loc['2020-03-31']
c2 = c2.dropna()
else:
c2 = c
d2 = d
c2.country = country
c2.label = c.label
d2.label = d.label
d2.country = d.country
def plot_doubling_time_dev(ax, series, color, minchange=1):
# only keep values where there is a change of a minumum number
# get rid of data points where change is small values
(f, f_label) , (change_smoothed, smoothed_label), _ = compute_daily_change(series)
sel = change_smoothed < minchange
reduced = series.drop(f[sel].index, inplace=False)
if len(reduced) <= 1: # no data left
return ax
ratio = reduced.pct_change() + 1 # computes q2/q1 =
ratio_smooth = reduced.rolling(7, center=True, win_type='gaussian',
min_periods=7).mean(std=3).pct_change() + 1 # computes q2/q1
# can have np.inf and np.nan at this point.
# if those are the only values, then we should stop
ratio_smooth.replace(np.inf, np.nan, inplace=True)
if ratio_smooth.isna().all():
return ax
# return ratio, ratio_smooth
# compute the actual doubling time
dtime = double_time_exponential(ratio, t2_minus_t1=1)
dtime_smooth = double_time_exponential(ratio_smooth, t2_minus_t1=1)
label = series.country + " new " + series.label
ax.plot(dtime.index, dtime.values, 'o', color=color, alpha=0.3, label=label)
#return dtime_smooth, reduced, ratio, ratio_smooth
# good to take maximum value from here
dtime_smooth.replace(np.inf, np.nan, inplace=True) # get rid of x/0 results, which affect max()
#dtime_smooth.dropna(inplace=True)
ymax = min(dtime_smooth.max()*1.5, 5000) # China has doubling time of 3000 in between
## Adding a little bit of additional smoothing just for visual effects
dtime_smooth2 = dtime_smooth.rolling(3, win_type='gaussian', min_periods=1, center=True).mean(std=1)
ax.set_ylim(0, ymax)
ax.plot(dtime_smooth2.index, dtime_smooth2.values, "-", color=color, alpha=1.0,
label=label + ' 7-day rolling mean (stddev=3)',
linewidth=LW)
ax.legend()
ax.set_ylabel("doubling time [days]")
return ax, dtime
plot_doubling_time_dev(ax, d2, 'C0')
# +
fig, axes = plt.subplots(2, 1, figsize=(10, 5), sharex=False)
ax = axes[0]
plot_doubling_time(ax=ax, series=c2, color="C1", minchange=0.5);
#ax.set_ylim([0, 8])
ax = axes[1]
plot_doubling_time(ax=ax, series=d2, color="C0");
#ax.set_ylim([0, 100])
# +
series = c2
(f, f_label) , (f_smoothed, smoothed_label) = compute_growth_factor(series)
q = series.diff().pct_change() + 1
fig, axes = plt.subplots(2, 1, figsize=(10, 5), sharex=False)
ax = axes[0]
ax.plot(f.index, f.values, ".", color="C1", );
qsmooth = q.rolling(7, min_periods=1).mean()
ax.plot(qsmooth.index, qsmooth.values, "+", color="C0");
ax.set_ylim([0.8, 1.2])
ax = axes[1]
#plot_doubling_time(ax=ax, series=c2, color="C1");
dtime = double_time_exponential(q, t2_minus_t1=1)
dtime_smooth = double_time_exponential(qsmooth, t2_minus_t1=1)
ax.plot(dtime.index, dtime.values, 'x', label='raw data')
ax.plot(dtime_smooth.index, dtime_smooth.values, '-', label='smooth growth factor')
ax.set_ylim([0, 20])
# +
# # series.pct_change?
# -
# # Playground to explore different smoothing options
# +
fig, axes = plt.subplots(2, 1, figsize=(10, 5), sharex=False)
ax = axes[0]
n = 51
x = np.linspace(-10, 10, n)
x0 = 0
sigma = 2
data = np.exp(-(x-x0)**2/sigma**2) + (np.random.uniform(size=n) - 0.5) * 0.5
s = pd.Series(data=data, index=x)
s.iloc[n//2-2] = np.nan
s.iloc[n//2+15] = 1
ax.bar(s.index, s.values, alpha=0.2, width=0.3)
rgauss = s.rolling(7, center=True).mean()
rmean = s.rolling(7, center=True, win_type='gaussian', min_periods=1).mean(std=2)
rgauss = s.rolling(7, center=True, win_type='gaussian', min_periods=1).mean(std=3)
ax.plot(rmean.index, rmean.values, '-', label='box')
ax.plot(rgauss.index, rgauss.values, '-', linewidth=2, alpha=0.4, label='gaussian')
# -
print("s ", s.sum())
print("rmean ", rmean.sum())
print("gaus ", rgauss.sum())
| dev/plot-quality.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Neural Network for Image Classification: Application
#
# When you finish this, you will have finished the last programming assignment of Week 4, and also the last programming assignment of this course!
#
# You will use use the functions you'd implemented in the previous assignment to build a deep network, and apply it to cat vs non-cat classification. Hopefully, you will see an improvement in accuracy relative to your previous logistic regression implementation.
#
# **After this assignment you will be able to:**
# - Build and apply a deep neural network to supervised learning.
#
# Let's get started!
# ## 1 - Packages
# Let's first import all the packages that you will need during this assignment.
# - [numpy](www.numpy.org) is the fundamental package for scientific computing with Python.
# - [matplotlib](http://matplotlib.org) is a library to plot graphs in Python.
# - [h5py](http://www.h5py.org) is a common package to interact with a dataset that is stored on an H5 file.
# - [PIL](http://www.pythonware.com/products/pil/) and [scipy](https://www.scipy.org/) are used here to test your model with your own picture at the end.
# - dnn_app_utils provides the functions implemented in the "Building your Deep Neural Network: Step by Step" assignment to this notebook.
# - np.random.seed(1) is used to keep all the random function calls consistent. It will help us grade your work.
# +
import time
import numpy as np
import h5py
import matplotlib.pyplot as plt
import scipy
from PIL import Image
from scipy import ndimage
from dnn_app_utils_v2 import *
# %matplotlib inline
plt.rcParams['figure.figsize'] = (5.0, 4.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# %load_ext autoreload
# %autoreload 2
np.random.seed(1)
# -
# ## 2 - Dataset
#
# You will use the same "Cat vs non-Cat" dataset as in "Logistic Regression as a Neural Network" (Assignment 2). The model you had built had 70% test accuracy on classifying cats vs non-cats images. Hopefully, your new model will perform a better!
#
# **Problem Statement**: You are given a dataset ("data.h5") containing:
# - a training set of m_train images labelled as cat (1) or non-cat (0)
# - a test set of m_test images labelled as cat and non-cat
# - each image is of shape (num_px, num_px, 3) where 3 is for the 3 channels (RGB).
#
# Let's get more familiar with the dataset. Load the data by running the cell below.
train_x_orig, train_y, test_x_orig, test_y, classes = load_data()
# The following code will show you an image in the dataset. Feel free to change the index and re-run the cell multiple times to see other images.
# Example of a picture
index = 7
plt.imshow(train_x_orig[index])
print ("y = " + str(train_y[0,index]) + ". It's a " + classes[train_y[0,index]].decode("utf-8") + " picture.")
# +
# Explore your dataset
m_train = train_x_orig.shape[0]
num_px = train_x_orig.shape[1]
m_test = test_x_orig.shape[0]
print ("Number of training examples: " + str(m_train))
print ("Number of testing examples: " + str(m_test))
print ("Each image is of size: (" + str(num_px) + ", " + str(num_px) + ", 3)")
print ("train_x_orig shape: " + str(train_x_orig.shape))
print ("train_y shape: " + str(train_y.shape))
print ("test_x_orig shape: " + str(test_x_orig.shape))
print ("test_y shape: " + str(test_y.shape))
# -
# As usual, you reshape and standardize the images before feeding them to the network. The code is given in the cell below.
#
# <img src="images/imvectorkiank.png" style="width:450px;height:300px;">
#
# <caption><center> <u>Figure 1</u>: Image to vector conversion. <br> </center></caption>
# +
# Reshape the training and test examples
train_x_flatten = train_x_orig.reshape(train_x_orig.shape[0], -1).T # The "-1" makes reshape flatten the remaining dimensions
test_x_flatten = test_x_orig.reshape(test_x_orig.shape[0], -1).T
# Standardize data to have feature values between 0 and 1.
train_x = train_x_flatten/255.
test_x = test_x_flatten/255.
print ("train_x's shape: " + str(train_x.shape))
print ("test_x's shape: " + str(test_x.shape))
# -
# $12,288$ equals $64 \times 64 \times 3$ which is the size of one reshaped image vector.
# ## 3 - Architecture of your model
# Now that you are familiar with the dataset, it is time to build a deep neural network to distinguish cat images from non-cat images.
#
# You will build two different models:
# - A 2-layer neural network
# - An L-layer deep neural network
#
# You will then compare the performance of these models, and also try out different values for $L$.
#
# Let's look at the two architectures.
#
# ### 3.1 - 2-layer neural network
#
# <img src="images/2layerNN_kiank.png" style="width:650px;height:400px;">
# <caption><center> <u>Figure 2</u>: 2-layer neural network. <br> The model can be summarized as: ***INPUT -> LINEAR -> RELU -> LINEAR -> SIGMOID -> OUTPUT***. </center></caption>
#
# <u>Detailed Architecture of figure 2</u>:
# - The input is a (64,64,3) image which is flattened to a vector of size $(12288,1)$.
# - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ of size $(n^{[1]}, 12288)$.
# - You then add a bias term and take its relu to get the following vector: $[a_0^{[1]}, a_1^{[1]},..., a_{n^{[1]}-1}^{[1]}]^T$.
# - You then repeat the same process.
# - You multiply the resulting vector by $W^{[2]}$ and add your intercept (bias).
# - Finally, you take the sigmoid of the result. If it is greater than 0.5, you classify it to be a cat.
#
# ### 3.2 - L-layer deep neural network
#
# It is hard to represent an L-layer deep neural network with the above representation. However, here is a simplified network representation:
#
# <img src="images/LlayerNN_kiank.png" style="width:650px;height:400px;">
# <caption><center> <u>Figure 3</u>: L-layer neural network. <br> The model can be summarized as: ***[LINEAR -> RELU] $\times$ (L-1) -> LINEAR -> SIGMOID***</center></caption>
#
# <u>Detailed Architecture of figure 3</u>:
# - The input is a (64,64,3) image which is flattened to a vector of size (12288,1).
# - The corresponding vector: $[x_0,x_1,...,x_{12287}]^T$ is then multiplied by the weight matrix $W^{[1]}$ and then you add the intercept $b^{[1]}$. The result is called the linear unit.
# - Next, you take the relu of the linear unit. This process could be repeated several times for each $(W^{[l]}, b^{[l]})$ depending on the model architecture.
# - Finally, you take the sigmoid of the final linear unit. If it is greater than 0.5, you classify it to be a cat.
#
# ### 3.3 - General methodology
#
# As usual you will follow the Deep Learning methodology to build the model:
# 1. Initialize parameters / Define hyperparameters
# 2. Loop for num_iterations:
# a. Forward propagation
# b. Compute cost function
# c. Backward propagation
# d. Update parameters (using parameters, and grads from backprop)
# 4. Use trained parameters to predict labels
#
# Let's now implement those two models!
# ## 4 - Two-layer neural network
#
# **Question**: Use the helper functions you have implemented in the previous assignment to build a 2-layer neural network with the following structure: *LINEAR -> RELU -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
# ```python
# def initialize_parameters(n_x, n_h, n_y):
# ...
# return parameters
# def linear_activation_forward(A_prev, W, b, activation):
# ...
# return A, cache
# def compute_cost(AL, Y):
# ...
# return cost
# def linear_activation_backward(dA, cache, activation):
# ...
# return dA_prev, dW, db
# def update_parameters(parameters, grads, learning_rate):
# ...
# return parameters
# ```
### CONSTANTS DEFINING THE MODEL ####
n_x = 12288 # num_px * num_px * 3
n_h = 7
n_y = 1
layers_dims = (n_x, n_h, n_y)
# +
# GRADED FUNCTION: two_layer_model
def two_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):
"""
Implements a two-layer neural network: LINEAR->RELU->LINEAR->SIGMOID.
Arguments:
X -- input data, of shape (n_x, number of examples)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- dimensions of the layers (n_x, n_h, n_y)
num_iterations -- number of iterations of the optimization loop
learning_rate -- learning rate of the gradient descent update rule
print_cost -- If set to True, this will print the cost every 100 iterations
Returns:
parameters -- a dictionary containing W1, W2, b1, and b2
"""
np.random.seed(1)
grads = {}
costs = [] # to keep track of the cost
m = X.shape[1] # number of examples
(n_x, n_h, n_y) = layers_dims
# Initialize parameters dictionary, by calling one of the functions you'd previously implemented
### START CODE HERE ### (≈ 1 line of code)
parameters = initialize_parameters(n_x, n_h, n_y)
### END CODE HERE ###
# Get W1, b1, W2 and b2 from the dictionary parameters.
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: LINEAR -> RELU -> LINEAR -> SIGMOID. Inputs: "X, W1, b1". Output: "A1, cache1, A2, cache2".
### START CODE HERE ### (≈ 2 lines of code)
A1, cache1 = linear_activation_forward(X, W1, b1, 'relu')
A2, cache2 = linear_activation_forward(A1, W2, b2, 'sigmoid')
### END CODE HERE ###
# Compute cost
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(A2, Y)
### END CODE HERE ###
# Initializing backward propagation
dA2 = - (np.divide(Y, A2) - np.divide(1 - Y, 1 - A2))
# Backward propagation. Inputs: "dA2, cache2, cache1". Outputs: "dA1, dW2, db2; also dA0 (not used), dW1, db1".
### START CODE HERE ### (≈ 2 lines of code)
dA1, dW2, db2 = linear_activation_backward(dA2, cache2, 'sigmoid')
dA0, dW1, db1 = linear_activation_backward(dA1, cache1, 'relu')
### END CODE HERE ###
# Set grads['dWl'] to dW1, grads['db1'] to db1, grads['dW2'] to dW2, grads['db2'] to db2
grads['dW1'] = dW1
grads['db1'] = db1
grads['dW2'] = dW2
grads['db2'] = db2
# Update parameters.
### START CODE HERE ### (approx. 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Retrieve W1, b1, W2, b2 from parameters
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print("Cost after iteration {}: {}".format(i, np.squeeze(cost)))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# -
# Run the cell below to train your parameters. See if your model runs. The cost should be decreasing. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
parameters = two_layer_model(train_x, train_y, layers_dims = (n_x, n_h, n_y), num_iterations = 2500, print_cost=True)
# **Expected Output**:
# <table>
# <tr>
# <td> **Cost after iteration 0**</td>
# <td> 0.6930497356599888 </td>
# </tr>
# <tr>
# <td> **Cost after iteration 100**</td>
# <td> 0.6464320953428849 </td>
# </tr>
# <tr>
# <td> **...**</td>
# <td> ... </td>
# </tr>
# <tr>
# <td> **Cost after iteration 2400**</td>
# <td> 0.048554785628770206 </td>
# </tr>
# </table>
# Good thing you built a vectorized implementation! Otherwise it might have taken 10 times longer to train this.
#
# Now, you can use the trained parameters to classify images from the dataset. To see your predictions on the training and test sets, run the cell below.
predictions_train = predict(train_x, train_y, parameters)
# **Expected Output**:
# <table>
# <tr>
# <td> **Accuracy**</td>
# <td> 1.0 </td>
# </tr>
# </table>
predictions_test = predict(test_x, test_y, parameters)
# **Expected Output**:
#
# <table>
# <tr>
# <td> **Accuracy**</td>
# <td> 0.72 </td>
# </tr>
# </table>
# **Note**: You may notice that running the model on fewer iterations (say 1500) gives better accuracy on the test set. This is called "early stopping" and we will talk about it in the next course. Early stopping is a way to prevent overfitting.
#
# Congratulations! It seems that your 2-layer neural network has better performance (72%) than the logistic regression implementation (70%, assignment week 2). Let's see if you can do even better with an $L$-layer model.
# ## 5 - L-layer Neural Network
#
# **Question**: Use the helper functions you have implemented previously to build an $L$-layer neural network with the following structure: *[LINEAR -> RELU]$\times$(L-1) -> LINEAR -> SIGMOID*. The functions you may need and their inputs are:
# ```python
# def initialize_parameters_deep(layer_dims):
# ...
# return parameters
# def L_model_forward(X, parameters):
# ...
# return AL, caches
# def compute_cost(AL, Y):
# ...
# return cost
# def L_model_backward(AL, Y, caches):
# ...
# return grads
# def update_parameters(parameters, grads, learning_rate):
# ...
# return parameters
# ```
### CONSTANTS ###
layers_dims = [12288, 20, 7, 5, 1] # 5-layer model
# +
# GRADED FUNCTION: L_layer_model
def L_layer_model(X, Y, layers_dims, learning_rate = 0.0075, num_iterations = 3000, print_cost=False):#lr was 0.009
"""
Implements a L-layer neural network: [LINEAR->RELU]*(L-1)->LINEAR->SIGMOID.
Arguments:
X -- data, numpy array of shape (number of examples, num_px * num_px * 3)
Y -- true "label" vector (containing 0 if cat, 1 if non-cat), of shape (1, number of examples)
layers_dims -- list containing the input size and each layer size, of length (number of layers + 1).
learning_rate -- learning rate of the gradient descent update rule
num_iterations -- number of iterations of the optimization loop
print_cost -- if True, it prints the cost every 100 steps
Returns:
parameters -- parameters learnt by the model. They can then be used to predict.
"""
np.random.seed(1)
costs = [] # keep track of cost
# Parameters initialization.
### START CODE HERE ###
parameters = initialize_parameters_deep(layers_dims)
### END CODE HERE ###
# Loop (gradient descent)
for i in range(0, num_iterations):
# Forward propagation: [LINEAR -> RELU]*(L-1) -> LINEAR -> SIGMOID.
### START CODE HERE ### (≈ 1 line of code)
AL, caches = L_model_forward(X, parameters)
### END CODE HERE ###
# Compute cost.
### START CODE HERE ### (≈ 1 line of code)
cost = compute_cost(AL, Y)
### END CODE HERE ###
# Backward propagation.
### START CODE HERE ### (≈ 1 line of code)
grads = L_model_backward(AL, Y, caches)
### END CODE HERE ###
# Update parameters.
### START CODE HERE ### (≈ 1 line of code)
parameters = update_parameters(parameters, grads, learning_rate)
### END CODE HERE ###
# Print the cost every 100 training example
if print_cost and i % 100 == 0:
print ("Cost after iteration %i: %f" %(i, cost))
if print_cost and i % 100 == 0:
costs.append(cost)
# plot the cost
plt.plot(np.squeeze(costs))
plt.ylabel('cost')
plt.xlabel('iterations (per tens)')
plt.title("Learning rate =" + str(learning_rate))
plt.show()
return parameters
# -
# You will now train the model as a 5-layer neural network.
#
# Run the cell below to train your model. The cost should decrease on every iteration. It may take up to 5 minutes to run 2500 iterations. Check if the "Cost after iteration 0" matches the expected output below, if not click on the square (⬛) on the upper bar of the notebook to stop the cell and try to find your error.
parameters = L_layer_model(train_x, train_y, layers_dims, num_iterations = 2500, print_cost = True)
# **Expected Output**:
# <table>
# <tr>
# <td> **Cost after iteration 0**</td>
# <td> 0.771749 </td>
# </tr>
# <tr>
# <td> **Cost after iteration 100**</td>
# <td> 0.672053 </td>
# </tr>
# <tr>
# <td> **...**</td>
# <td> ... </td>
# </tr>
# <tr>
# <td> **Cost after iteration 2400**</td>
# <td> 0.092878 </td>
# </tr>
# </table>
pred_train = predict(train_x, train_y, parameters)
# <table>
# <tr>
# <td>
# **Train Accuracy**
# </td>
# <td>
# 0.985645933014
# </td>
# </tr>
# </table>
pred_test = predict(test_x, test_y, parameters)
# **Expected Output**:
#
# <table>
# <tr>
# <td> **Test Accuracy**</td>
# <td> 0.8 </td>
# </tr>
# </table>
# Congrats! It seems that your 5-layer neural network has better performance (80%) than your 2-layer neural network (72%) on the same test set.
#
# This is good performance for this task. Nice job!
#
# Though in the next course on "Improving deep neural networks" you will learn how to obtain even higher accuracy by systematically searching for better hyperparameters (learning_rate, layers_dims, num_iterations, and others you'll also learn in the next course).
# ## 6) Results Analysis
#
# First, let's take a look at some images the L-layer model labeled incorrectly. This will show a few mislabeled images.
print_mislabeled_images(classes, test_x, test_y, pred_test)
# **A few type of images the model tends to do poorly on include:**
# - Cat body in an unusual position
# - Cat appears against a background of a similar color
# - Unusual cat color and species
# - Camera Angle
# - Brightness of the picture
# - Scale variation (cat is very large or small in image)
# ## 7) Test with your own image (optional/ungraded exercise) ##
#
# Congratulations on finishing this assignment. You can use your own image and see the output of your model. To do that:
# 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub.
# 2. Add your image to this Jupyter Notebook's directory, in the "images" folder
# 3. Change your image's name in the following code
# 4. Run the code and check if the algorithm is right (1 = cat, 0 = non-cat)!
# +
## START CODE HERE ##
my_image = "my_image.jpg" # change this to the name of your image file
my_label_y = [1] # the true class of your image (1 -> cat, 0 -> non-cat)
## END CODE HERE ##
fname = "images/" + my_image
image = np.array(ndimage.imread(fname, flatten=False))
my_image = scipy.misc.imresize(image, size=(num_px,num_px)).reshape((num_px*num_px*3,1))
my_predicted_image = predict(my_image, my_label_y, parameters)
plt.imshow(image)
print ("y = " + str(np.squeeze(my_predicted_image)) + ", your L-layer model predicts a \"" + classes[int(np.squeeze(my_predicted_image)),].decode("utf-8") + "\" picture.")
# -
# **References**:
#
# - for auto-reloading external module: http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
| 01-Neural Networks and Deep Learning/week4/Deep Neural Network for Image Classification-Application.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import collections
def print_len_abc_sized(x):
if isinstance(x, collections.abc.Sized):
print(len(x))
else:
print('x is not Sized')
print_len_abc_sized([0, 1, 2])
print_len_abc_sized('abc')
print_len_abc_sized({0, 1, 2})
print_len_abc_sized(100)
def print_len_abc_sequence(x):
if isinstance(x, collections.abc.Sequence):
print(len(x))
else:
print('x is not Sequence')
print_len_abc_sequence([0, 1, 2])
print_len_abc_sequence('abc')
print_len_abc_sequence({0, 1, 2})
print_len_abc_sequence({'k1': 1, 'k2': 2, 'k3': 3})
def print_len_abc_mutablesequence(x):
if isinstance(x, collections.abc.MutableSequence):
print(len(x))
else:
print('x is not MutableSequence')
print_len_abc_mutablesequence([0, 1, 2])
print_len_abc_mutablesequence('abc')
print_len_abc_mutablesequence((0, 1, 2))
class MySequence(collections.abc.Sequence):
def __len__(self):
return 10
# +
# ms = MySequence()
# TypeError: Can't instantiate abstract class MySequence with abstract methods __getitem__
# -
class MySequence(collections.abc.Sequence):
def __len__(self):
return 10
def __getitem__(self, i):
return i
ms = MySequence()
print(len(ms))
print(ms[3])
print(ms.index(5))
print(list(reversed(ms)))
print(isinstance(ms, collections.abc.Sequence))
print(hasattr(ms, '__len__'))
class MySequence_bare():
def __len__(self):
return 10
def __getitem__(self, i):
return i
msb = MySequence_bare()
print(len(msb))
print(msb[3])
# +
# print(msb.index(5))
# AttributeError: 'MySequence_bare' object has no attribute 'index'
# -
print(isinstance(msb, collections.abc.Sequence))
print(hasattr(msb, '__len__'))
| notebook/print_len_collections_abc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ARC Tools
# ## Coordinates conversions
# Below, `xyz` and `zmat` refer to Cartesian and internal coordinates, respectively
from arc.species.converter import (zmat_to_xyz,
xyz_to_str,
zmat_from_xyz,
zmat_to_str,
xyz_to_xyz_file_format,
xyz_file_format_to_xyz,
check_xyz_dict,
check_zmat_dict,
zmat_to_str,
str_to_zmat)
from arc.species.species import ARCSpecies
from arc.species.zmat import consolidate_zmat
import pprint
path = '/home/alongd/Code/runs/T3/35/iteration_0/ARC/calcs/Species/C2H4_0/freq_a6950/output.out'
xyz1 = xyz_to_str(path)
pprint.pprint(zmat_from_xyz(xyz1))
# ##### xyz str to ARC's xyz dict:
# Note: `xyz_str` could also be a path to a file from which the coordinates will be parsed
# +
xyz_str = """O 1.53830201 0.86423425 0.07482439
C 0.94923576 -0.20847619 -0.03881977
C -0.56154542 -0.31516675 -0.05011465
O -1.18981166 0.93489731 0.17603211
H 1.49712659 -1.15833718 -0.15458647
H -0.87737433 -0.70077243 -1.02287491
H -0.87053611 -1.01071746 0.73427128
H -0.48610273 1.61361259 0.11915705"""
xyz_dict = check_xyz_dict(xyz_str)
pprint.pprint(xyz_dict)
# -
# ##### ARC's xyz dict to xyz str:
# +
xyz_dict = {'symbols': ('S', 'O', 'O', 'N', 'C', 'H', 'H', 'H', 'H', 'H'),
'isotopes': (32, 16, 16, 14, 12, 1, 1, 1, 1, 1),
'coords': ((-0.06618943, -0.12360663, -0.07631983),
(-0.79539707, 0.86755487, 1.02675668),
(-0.68919931, 0.25421823, -1.34830853),
(0.01546439, -1.54297548, 0.44580391),
(1.59721519, 0.47861334, 0.00711),
(1.94428095, 0.40772394, 1.03719428),
(2.20318015, -0.14715186, -0.64755729),
(1.59252246, 1.5117895, -0.33908352),
(-0.8785689, -2.02453514, 0.38494433),
(-1.34135876, 1.49608206, 0.53295071))}
xyz_str = xyz_to_str(check_xyz_dict(xyz_dict))
print(xyz_str)
# -
# ##### xyz (dict or str) to XYZ file format:
# +
xyz = """O 1.53830201 0.86423425 0.07482439
C 0.94923576 -0.20847619 -0.03881977
C -0.56154542 -0.31516675 -0.05011465
O -1.18981166 0.93489731 0.17603211
H 1.49712659 -1.15833718 -0.15458647
H -0.87737433 -0.70077243 -1.02287491
H -0.87053611 -1.01071746 0.73427128
H -0.48610273 1.61361259 0.11915705"""
xyz_file = xyz_to_xyz_file_format(check_xyz_dict(xyz))
print(xyz_file)
# -
# ##### XYZ file format to ARC's xyz dict:
# +
xyz_file = """7
S 1.02558264 -0.04344404 -0.07343859
O -0.25448248 1.10710477 0.18359696
N -1.30762173 0.15796567 -0.10489290
C -0.49011438 -1.03704380 0.15365747
H -0.64869950 -1.85796321 -0.54773423
H -0.60359153 -1.37304859 1.18613964
H -1.43009127 0.23517346 -1.11797908"""
xyz_dict = xyz_file_format_to_xyz(xyz_file)
pprint.pprint(xyz_dict)
# -
# ##### xyz to zmat (non-consolidated):
# +
xyz = """C 0.00000000 0.00000000 0.00000000
H 0.63003260 0.63003260 0.63003260
H -0.63003260 -0.63003260 0.63003260
H -0.63003260 0.63003260 -0.63003260
H 0.63003260 -0.63003260 -0.63003260"""
zmat = zmat_from_xyz(xyz, mol=ARCSpecies(label='to_zmat', xyz=xyz).mol, consolidate=False)
pprint.pprint(zmat)
# -
# ##### xyz to zmat (consolidated):
# +
xyz = """C 0.00000000 0.00000000 0.00000000
H 0.63003260 0.63003260 0.63003260
H -0.63003260 -0.63003260 0.63003260
H -0.63003260 0.63003260 -0.63003260
H 0.63003260 -0.63003260 -0.63003260"""
zmat = zmat_from_xyz(xyz, mol=ARCSpecies(label='to_zmat', xyz=xyz).mol, consolidate=True)
pprint.pprint(zmat)
# -
# ##### zmat to xyz:
# +
zmat = {'symbols': ('N', 'C', 'X', 'C', 'C', 'C', 'C', 'C', 'C',
'C', 'X', 'C', 'H', 'H', 'H', 'H', 'X', 'H'),
'coords': ((None, None, None),
('R_1_0', None, None),
('RX_2|10|16_1|9|11', 'AX_2|3|10|11|16|17_1|1|9|9|11|11_0|2|7|10|7|16', None),
('R_3_1', 'AX_2|3|10|11|16|17_1|1|9|9|11|11_0|2|7|10|7|16',
'DX_3|11|17_1|9|11_2|10|16_0|7|7'),
('R_4_3', 'A_4_3_1', 'D_4_3_1_0'),
('R_5_3', 'A_5_3_4', 'D_5|9_3|7_4|8_1|6'),
('R_6_4', 'A_6_4_3', 'D_6_4_3_5'),
('R_7_5', 'A_7_5_3', 'D_7|8_5|6_3|4_4|3'),
('R_8_6', 'A_8_6_4', 'D_7|8_5|6_3|4_4|3'),
('R_9_7', 'A_9_7_8', 'D_5|9_3|7_4|8_1|6'),
('RX_2|10|16_1|9|11', 'AX_2|3|10|11|16|17_1|1|9|9|11|11_0|2|7|10|7|16', 'DX_10_9_7_8'),
('R_11_9', 'AX_2|3|10|11|16|17_1|1|9|9|11|11_0|2|7|10|7|16',
'DX_3|11|17_1|9|11_2|10|16_0|7|7'),
('R_12_8', 'A_12_8_6', 'D_12|13|14_8|6|4_6|8|6_4|7|8'),
('R_13_6', 'A_13_6_8', 'D_12|13|14_8|6|4_6|8|6_4|7|8'),
('R_14|15_4|5', 'A_14_4_6', 'D_12|13|14_8|6|4_6|8|6_4|7|8'),
('R_14|15_4|5', 'A_15_5_7', 'D_15_5_7_9'),
('RX_2|10|16_1|9|11', 'AX_2|3|10|11|16|17_1|1|9|9|11|11_0|2|7|10|7|16', 'DX_16_11_7_9'),
('R_17_11', 'AX_2|3|10|11|16|17_1|1|9|9|11|11_0|2|7|10|7|16',
'DX_3|11|17_1|9|11_2|10|16_0|7|7')),
'vars': {'R_1_0': 1.160587988259717, 'R_3_1': 1.4334034806467013, 'R_4_3': 1.399627009160122,
'A_4_3_1': 120.07019183443934, 'D_4_3_1_0': 359.9937872737281,
'R_5_3': 1.399473903784766, 'A_5_3_4': 119.881331942158, 'R_6_4': 1.3958533508611464,
'A_6_4_3': 120.08126334426318, 'D_6_4_3_5': 359.9999896133953,
'R_7_5': 1.3971923740671386, 'A_7_5_3': 119.9563068700548, 'R_8_6': 1.3959594593665048,
'A_8_6_4': 119.9482566876851, 'R_9_7': 1.4305023206087322, 'A_9_7_8': 120.02391642181419,
'DX_10_9_7_8': 179.99687489419622, 'R_11_9': 1.2007843136670773,
'R_12_8': 1.0880999543508234, 'A_12_8_6': 119.53324505705585,
'R_13_6': 1.0875755415362989, 'A_13_6_8': 119.9515345136673,
'A_14_4_6': 119.41761055861897, 'A_15_5_7': 119.8756160539422,
'DX_16_11_7_9': 179.99364917335183, 'R_17_11': 1.0653051483625748,
'RX_2|10|16_1|9|11': 1.0, 'R_14|15_4|5': 1.088721623781535,
'AX_2|3|10|11|16|17_1|1|9|9|11|11_0|2|7|10|7|16': 90.0,
'DX_3|11|17_1|9|11_2|10|16_0|7|7': 180.0,
'D_12|13|14_8|6|4_6|8|6_4|7|8': 180.00000001419417,
'D_5|9_3|7_4|8_1|6': 180.0000026484778, 'D_15_5_7_9': 7.778248095798993e-06,
'D_7|8_5|6_3|4_4|3': 6.434770254282058e-06},
'map': {0: 5, 1: 4, 2: 'X', 3: 3, 4: 2, 5: 6, 6: 1, 7: 7, 8: 0, 9: 8, 10: 'X', 11: 9, 12: 10,
13: 11, 14: 12, 15: 13, 16: 'X', 17: 14}}
xyz_dict = zmat_to_xyz(check_zmat_dict(zmat))
pprint.pprint(xyz_dict)
# -
# ##### consolidated a zmat:
# +
zmat = {'symbols': ('C', 'H', 'H', 'H', 'H'),
'coords': ((None, None, None),
('R_0_1', None, None),
('R_0_2', 'A_0_1_2', None),
('R_0_3', 'A_0_1_3', 'D_0_1_2_3'),
('R_0_4', 'A_0_1_4', 'D_0_1_2_4')),
'vars': {'A_0_1_2': 35.26438764560717,
'A_0_1_3': 35.26438764560717,
'A_0_1_4': 35.26438764560717,
'D_0_1_2_3': 324.73561031724535,
'D_0_1_2_4': 35.26438968275465,
'R_0_1': 1.0912484581271156,
'R_0_2': 1.0912484581271156,
'R_0_3': 1.0912484581271156,
'R_0_4': 1.0912484581271156},
'map': {0: 0, 1: 1, 2: 2, 3: 3, 4: 4}}
zmat = consolidate_zmat(zmat)
pprint.pprint(zmat)
# -
# ##### zmat dict to ESS-specific zmat string:
# +
zmat = {'symbols': ('C', 'C', 'C', 'C', 'X', 'C', 'C', 'C', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),
'coords': ((None, None, None), ('R_1|7_0|6', None, None), ('R_2|6_1|5', 'A_2|7_1|6_0|5', None),
('R_3|5_2|3', 'A_3|6_2|5_1|3', 'D_3|7_2|6_1|5_0|3'),
('RX_4_3', 'AX_4|5_3|3_2|4', 'DX_4_3_2_1'),
('R_3|5_2|3', 'AX_4|5_3|3_2|4', 'DX_5_3_4_2'),
('R_2|6_1|5', 'A_3|6_2|5_1|3', 'D_6_5_3_1'),
('R_1|7_0|6', 'A_2|7_1|6_0|5', 'D_3|7_2|6_1|5_0|3'),
('R_8|11|12|15_0|2|5|7', 'A_8|15_0|7_1|6', 'D_8_0_1_7'),
('R_9|14_0|7', 'A_9|14_0|7_1|6', 'D_9_0_1_8'),
('R_10|13_1|6', 'A_10|13_1|6_0|7', 'D_10|14_1|7_0|6_9|13'),
('R_8|11|12|15_0|2|5|7', 'A_11|12_2|5_1|6', 'D_11|12_2|5_1|6_0|7'),
('R_8|11|12|15_0|2|5|7', 'A_11|12_2|5_1|6', 'D_11|12_2|5_1|6_0|7'),
('R_10|13_1|6', 'A_10|13_1|6_0|7', 'D_13_6_7_12'),
('R_9|14_0|7', 'A_9|14_0|7_1|6', 'D_10|14_1|7_0|6_9|13'),
('R_8|11|12|15_0|2|5|7', 'A_8|15_0|7_1|6', 'D_15_7_6_14')),
'vars': {'RX_4_3': 1.0, 'DX_4_3_2_1': 219.28799421779138, 'DX_5_3_4_2': 180.0,
'D_6_5_3_1': 78.69721089515058, 'D_8_0_1_7': 303.5079357762497,
'D_9_0_1_8': 179.99747417664557, 'D_13_6_7_12': 180.0829054665434,
'D_15_7_6_14': 180.00215607227028, 'R_1|7_0|6': 1.3381887062084776,
'R_2|6_1|5': 1.4407904325150618, 'R_3|5_2|3': 1.3006576158575789,
'R_8|11|12|15_0|2|5|7': 1.0853633184695155, 'R_9|14_0|7': 1.0856141082269883,
'R_10|13_1|6': 1.0886528591087101, 'A_2|7_1|6_0|5': 123.19585370239227,
'A_3|6_2|5_1|3': 121.52258708303276, 'AX_4|5_3|3_2|4': 90.0,
'A_8|15_0|7_1|6': 122.24044548570495, 'A_9|14_0|7_1|6': 120.41807743308047,
'A_10|13_1|6_0|7': 119.30818147722846, 'A_11|12_2|5_1|6': 119.14551997750254,
'D_3|7_2|6_1|5_0|3': 180.11338840380205, 'D_10|14_1|7_0|6_9|13': 0.011830716823514614,
'D_11|12_2|5_1|6_0|7': 359.8632362707074},
'map': {0: 0, 1: 1, 2: 2, 3: 3, 4: 'X15', 5: 4, 6: 5, 7: 6, 8: 7, 9: 8, 10: 9, 11: 10, 12: 11,
13: 12, 14: 13, 15: 14}}
# allowed formats are: 'gaussian', 'qchem', 'molpro', 'orca', or 'psi4'
zmat_str = zmat_to_str(zmat, zmat_format='gaussian', consolidate=True)
print(zmat_str)
| ipython/Tools/coordinates conversions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image Classification Model - Serving Function
#
# This notebook demonstrates how to deploy a Tensorflow model using MLRun & Nuclio.
#
# **In this notebook you will:**
# * Write a Tensorflow-Model class to load and predict on the incoming data
# * Deploy the model as a serverless function
# * Invoke the serving endpoint with data as:
# * URLs to images hosted on S3
# * Direct image send
#
# **Steps:**
# * [Define Nuclio function](#Define-Nuclio-function)
# * [Install dependencies and set config](#Install-dependencies-and-set-config)
# * [Model serving class](#Model-Serving-Class)
# * [Deploy the serving function to the cluster](#Deploy-the-serving-function-to-the-cluster)
# * [Define test parameters](#Define-test-parameters)
# * [Test the deployed function on the cluster](#Test-the-deployed-function-on-the-cluster)
# ## Define Nuclio Function
# To use the magic commands for deploying this jupyter notebook as a nuclio function we must first import nuclio
# Since we do not want to import nuclio in the actual function, the comment annotation `nuclio: ignore` is used. This marks the cell for nuclio, telling it to ignore the cell's values when building the function.
# nuclio: ignore
import nuclio
# ### Install dependencies and set config
# > Note: Since tensorflow is being pulled from the baseimage it is not directly installed as a build command.
# If it is not installed on your system please uninstall and install using the line: `pip install tensorflow`
# +
# %nuclio config kind="nuclio:serving"
# %nuclio env MODEL_CLASS=TF2Model
# tensorflow 2 use the default serving image (or the mlrun/ml-models for a faster build)
# %nuclio config spec.build.baseImage = "mlrun/mlrun"
# -
# Since we are using packages which are not surely installed on our baseimage, or want to verify that a specific version of the package will be installed we use the `%nuclio cmd` annotation.
# >`%nuclio cmd` works both locally and during deployment by default, but can be set with `-c` flag to only run the commands while deploying or `-l` to set the variable for the local environment only.
# %%nuclio cmd -c
pip install tensorflow>=2.1
pip install requests pillow
# ## Function Code
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
import json
import numpy as np
import requests
from tensorflow import keras
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import load_img
from os import environ, path
from PIL import Image
from io import BytesIO
from urllib.request import urlopen
import mlrun
# ### Model Serving Class
# We define the `TFModel` class which we will use to define data handling and prediction of our model.
#
# The class should consist of:
# * `__init__(name, model_dir)` - Setup the internal parameters
# * `load(self)` - How to load the model and broadcast it's ready for prediction
# * `preprocess(self, body)` - How to handle the incoming event, forming the request to an `{'instances': [<samples>]}` dictionary as requested by the protocol
# * `predict(self, data)` - Receives and `{'instances': [<samples>]}` and returns the model's prediction as a list
# * `postprocess(self, data)` - Does any additional processing needed on the predictions.
class TFModel(mlrun.runtimes.MLModelServer):
def __init__(self, name: str, model_dir: str):
super().__init__(name, model_dir)
self.IMAGE_WIDTH = int(environ.get('IMAGE_WIDTH', '128'))
self.IMAGE_HEIGHT = int(environ.get('IMAGE_HEIGHT', '128'))
try:
with open(environ['classes_map'], 'r') as f:
self.classes = json.load(f)
except:
self.classes = None
def load(self):
model_file, extra_data = self.get_model('.h5')
self.model = load_model(model_file)
def preprocess(self, body):
try:
output = {'instances': []}
instances = body.get('instances', [])
for byte_image in instances:
img = Image.open(byte_image)
img = img.resize((self.IMAGE_WIDTH, self.IMAGE_HEIGHT))
# Load image
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
output['instances'].append(x)
# Format instances list
output['instances'] = [np.vstack(output['instances'])]
return output
except:
raise Exception(f'received: {body}')
def predict(self, data):
images = data.get('instances', [])
# Predict
predicted_probability = self.model.predict(images)
# return prediction
return predicted_probability
def postprocess(self, predicted_probability):
if self.classes:
predicted_classes = np.around(predicted_probability, 1).tolist()[0]
predicted_probabilities = predicted_probability.tolist()[0]
return {
'prediction': [self.classes[str(int(cls))] for cls in predicted_classes],
f'{self.classes["1"]}-probability': predicted_probabilities
}
else:
return predicted_probability.tolist()[0]
# To let our nuclio builder know that our function code ends at this point we will use the comment annotation `nuclio: end-code`.
#
# Any new cell from now on will be treated as if a `nuclio: ignore` comment was set, and will not be added to the funcion.
# +
# nuclio: end-code
# -
# ## Test the function locally
# Make sure your local TF / Keras version is the same as pulled in the nuclio image for accurate testing
#
# Set the served models and their file paths using: `SERVING_MODEL_<name> = <model file path>`
#
# > Note: this notebook assumes the model and categories are under <b>/User/mlrun/examples/</b>
from PIL import Image
from io import BytesIO
import matplotlib.pyplot as plt
import os
# ### Define test parameters
# +
# Testing event
cat_image_url = 'https://s3.amazonaws.com/iguazio-sample-data/images/catanddog/cat.102.jpg'
response = requests.get(cat_image_url)
cat_image = response.content
img = Image.open(BytesIO(cat_image))
print('Test image:')
plt.imshow(img)
# -
# ### Define Function specifications
# +
import os
from mlrun import mlconf
# Model Server variables
model_class = 'TFModel'
model_name = 'cat_vs_dog_tfv2' # Define for later use in tests
models = {model_name: os.path.join(mlconf.artifact_path, 'tf2/cats_n_dogs.h5')}
# Specific model variables
function_envs = {
'IMAGE_HEIGHT': 128,
'IMAGE_WIDTH': 128,
'classes_map': '/User/artifacts/categories_map.json',
}
# -
# ## Deploy the serving function to the cluster
from mlrun import new_model_server, mount_v3io
# +
# Setup the model server function
fn = new_model_server('tf2-serving',
model_class=model_class,
models=models)
fn.set_envs(function_envs)
fn.spec.description = "tf2 image classification server"
fn.metadata.categories = ['serving', 'dl']
fn.metadata.labels = {'author': 'yaronh'}
fn.export("function.yaml")
# -
if "V3IO_HOME" in list(os.environ):
from mlrun import mount_v3io
fn.apply(mount_v3io())
else:
# is you set up mlrun using the instructions at
# https://github.com/mlrun/mlrun/blob/master/hack/local/README.md
from mlrun.platforms import mount_pvc
fn.apply(mount_pvc('nfsvol', 'nfsvol', '/home/joyan/data'))
# Deploy the model server
addr = fn.deploy(project='cat-and-dog-servers')
# ## Test the deployed function on the cluster
# ### Test the deployed function (with URL)
# +
# URL event
event_body = json.dumps({"data_url": cat_image_url})
print(f'Sending event: {event_body}')
headers = {'Content-type': 'application/json'}
response = requests.post(url=addr + f'/{model_name}/predict', data=event_body, headers=headers)
response.content
# -
# ### Test the deployed function (with Jpeg Image)
# +
# URL event
event_body = cat_image
print(f'Sending image from {cat_image_url}')
plt.imshow(img)
headers = {'Content-type': 'image/jpeg'}
response = requests.post(url=addr + f'/{model_name}/predict/', data=event_body, headers=headers)
response.content
| tf2_serving/tf2_serving.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tensorflow_env
# language: python
# name: tensorflow_env
# ---
# # Autoencoder LSTM
from numpy import array
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import RepeatVector
from tensorflow.keras.layers import TimeDistributed
# ## Defining a sequence
sequence = array([0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
n_in = len(sequence)
sequence = sequence.reshape(1, n_in, 1)
sequence
# ## Building Model
model = Sequential()
model.add(LSTM(100, activation='relu', input_shape=(n_in,1)))
model.add(RepeatVector(n_in))
model.add(LSTM(100, activation='relu', return_sequences=True))
model.add(TimeDistributed(Dense(1)))
model.compile(optimizer='adam', loss='mse')
# ## Fitting the model
model.fit(sequence, sequence, epochs=300, verbose=0)
# ## Predicting
yhat = model.predict(sequence, verbose=0)
print(yhat[0,:,0])
| autoencoder_lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="A4eoZWGBUE2F"
import matplotlib.pyplot as plt
from scipy.signal import find_peaks
from sklearn.cluster import KMeans
import seaborn as sns
import numpy as np
import sys, shutil
import pandas as pd
import matplotlib.pyplot as plt
# + id="rgsSsZtWZ_jt"
if sys.version_info[0] < 3:
from StringIO import StringIO
else:
from io import StringIO
data = StringIO("""
1619866801532,197,185,182,206,208,,,,,,,,,,,,,266,265,197,185,182,206,208,,,,,,,,,,,,,266,265
1619866801679,200,188,188,208,210,,,,,,,,,,,,,269,244,200,188,188,208,210,,,,,,,,,,,,,269,244
1619866802048,205,192,192,213,215,,,,,,,,,,,,,270,261,205,192,192,213,215,,,,,,,,,,,,,270,261
1619866802161,204,190,190,212,212,,,,,,,,,,,,,269,262,204,190,190,212,212,,,,,,,,,,,,,269,262
1619866802294,192,175,177,195,198,,,,,,,,,,,,,273,260,192,175,177,195,198,,,,,,,,,,,,,273,260
1619866802428,178,164,164,185,190,,,,,,,,,,,,,133,258,178,164,164,185,190,,,,,,,,,,,,,133,258
1619866802630,172,149,149,170,171,,,,,,,,,,,,,276,260,172,149,149,170,171,,,,,,,,,,,,,276,260
1619866802836,148,118,121,129,141,,,,,,,,,,,,,256,252,148,118,121,129,141,,,,,,,,,,,,,256,252
1619866802977,141,115,119,130,141,,,,,,,,,,,,,251,250,141,115,119,130,141,,,,,,,,,,,,,251,250
1619866803108,135,114,118,137,147,,,,,,,,,,,,,251,251,135,114,118,137,147,,,,,,,,,,,,,251,251
1619866803231,137,122,122,140,155,,,,,,,,,,,,,252,258,137,122,122,140,155,,,,,,,,,,,,,252,258
1619866803371,151,136,138,160,163,252,,,,,,,,,,,,263,258,151,136,138,160,163,252,,,,,,,,,,,,263,258
1619866803492,173,160,163,185,192,,,,,,,,,,,,,267,259,173,160,163,185,192,,,,,,,,,,,,,267,259
1619866803611,170,160,160,189,190,,,,,,,,,,,,,255,255,170,160,160,189,190,,,,,,,,,,,,,255,255
1619866803749,172,164,162,194,192,,,,,,,,,,,,,261,256,172,164,162,194,192,,,,,,,,,,,,,261,256
1619866803891,174,165,165,195,193,,,,,,,,,,,,,269,258,174,165,165,195,193,,,,,,,,,,,,,269,258
1619866804058,162,155,154,181,,254,,,,,,,,,,,,263,251,162,155,154,181,,254,,,,,,,,,,,,263,251
1619866804187,156,148,146,178,172,252,,,,,,,,,,,,244,258,156,148,146,178,172,252,,,,,,,,,,,,244,258
1619866804318,156,148,143,185,,,,,,,,,,,,,,264,253,156,148,143,185,,,,,,,,,,,,,,264,253
1619866804453,151,144,138,169,155,,,,,,,,,,,,,250,258,151,144,138,169,155,,,,,,,,,,,,,250,258
1619866804587,172,164,161,187,172,,,,,,,,,,,,,270,255,172,164,161,187,172,,,,,,,,,,,,,270,255
1619866804735,176,171,167,202,,,,,,,,,,,,,,274,256,176,171,167,202,,,,,,,,,,,,,,274,256
1619866804869,162,160,153,187,171,,,,,,,,,,,,,262,256,162,160,153,187,171,,,,,,,,,,,,,262,256
1619866805001,183,179,174,,198,,,,,,,,,,,,,269,224,183,179,174,,198,,,,,,,,,,,,,269,224
1619866805121,183,177,172,,199,,,,,,,,,,,,,266,259,183,177,172,,199,,,,,,,,,,,,,266,259
1619866805242,199,193,185,203,196,,,,,,,,,,,,,272,262,199,193,185,203,196,,,,,,,,,,,,,272,262
1619866805372,,174,171,,,,,,,,,,,,,,,277,258,,174,171,,,,,,,,,,,,,,,277,258
1619866805496,191,187,181,,191,,,,,,,,,,,,,252,265,191,187,181,,191,,,,,,,,,,,,,252,265
1619866805626,,190,181,,,,,,,,,,,,,,,251,216,,190,181,,,,,,,,,,,,,,,251,216
1619866805749,,,,,,,,,,,,,,,,,,196,263,,,,,,,,,,,,,,,,,,196,263
1619866805885,,188,,,,,,,,,,,,,,,,214,261,,188,,,,,,,,,,,,,,,,214,261
1619866806008,,,,,,,,,,,,,,,,,,164,264,,,,,,,,,,,,,,,,,,164,264
1619866806117,,,,,,,,,,,,,,,,,,168,256,,,,,,,,,,,,,,,,,,168,256
1619866806232,,,,,,,,,,,,,,,,,,119,242,,,,,,,,,,,,,,,,,,119,242
1619866806364,,,,,,,,,,,,,,,,,,93,165,,,,,,,,,,,,,,,,,,93,165
1619866806509,,65,70,69,,,,,,,,,,,,,,172,202,,65,70,69,,,,,,,,,,,,,,172,202
1619866806660,51,24,,,,,,,,,,,,,,,,137,265,51,24,,,,,,,,,,,,,,,,137,265
1619866806805,,,,,,97,,,,,,,,,,,,94,229,,,,,,97,,,,,,,,,,,,94,229
1619866807002,,,,,,7,,,,,,,,,,,,,145,,,,,,7,,,,,,,,,,,,,145
1619866807133,,,,,,,,,,,,,,,,,,105,75,,,,,,,,,,,,,,,,,,105,75
1619866807255,,,,,,,,,,,,,,,,,,102,141,,,,,,,,,,,,,,,,,,102,141
1619866807386,,,,,,,,,,,,,,,,,,34,30,,,,,,,,,,,,,,,,,,34,30
1619866807509,,,,,,,,,,,,,,,,,,143,,,,,,,,,,,,,,,,,,,143,
1619866807676,,,,,,,,,,,,,,,,,,144,,,,,,,,,,,,,,,,,,,144,
1619866807807,,,,,,,,,,,,,,,,,,128,23,,,,,,,,,,,,,,,,,,128,23
1619866807949,,,,,,,,,,,,,,,,,,11,,,,,,,,,,,,,,,,,,,11,
1619866808088,,,,,,,,40,,,,,,,,,,15,2,,,,,,,,40,,,,,,,,,,15,2
1619866808215,,,,,,,,56,,,,,,,,,,,,,,,,,,,56,,,,,,,,,,,
1619866808347,,,,,,,,65,,,68,-3,-2,,93,,,,,,,,,,,,65,,,68,-3,-2,,93,,,,
1619866808465,,,,,,,,,,50,54,-1,-2,103,85,,,59,,,,,,,,,,,50,54,-1,-2,103,85,,,59,
1619866808604,,,,,,,,,,40,56,,17,130,94,208,210,,20,,,,,,,,,,40,56,,17,130,94,208,210,,20
1619866808726,,,,,,,,-1,6,40,66,31,30,138,104,222,233,,30,,,,,,,,-1,6,40,66,31,30,138,104,222,233,,30
1619866808848,,,,,,,,9,12,41,63,38,39,132,112,222,229,,38,,,,,,,,9,12,41,63,38,39,132,112,222,229,,38
1619866808971,,,,,,,,14,14,48,53,42,41,127,117,225,218,,41,,,,,,,,14,14,48,53,42,41,127,117,225,218,,41
1619866809089,,,,,,-7,-8,23,14,70,69,51,51,131,128,223,213,,51,,,,,,-7,-8,23,14,70,69,51,51,131,128,223,213,,51
1619866809209,,,,,,-5,-9,33,22,79,57,58,57,141,142,223,213,,57,,,,,,-5,-9,33,22,79,57,58,57,141,142,223,213,,57
1619866809340,,,,,,-2,-5,43,33,65,66,65,65,144,147,224,221,,65,,,,,,-2,-5,43,33,65,66,65,65,144,147,224,221,,65
1619866809475,,,,,,3,4,42,45,75,77,71,73,145,150,224,218,3,72,,,,,,3,4,42,45,75,77,71,73,145,150,224,218,3,72
1619866809617,,,,,,4,8,44,52,78,89,76,79,143,147,217,221,6,77,,,,,,4,8,44,52,78,89,76,79,143,147,217,221,6,77
1619866809745,-7,-11,-11,,-9,13,17,48,57,81,94,78,80,144,146,212,220,15,79,-7,-11,-11,,-9,13,17,48,57,81,94,78,80,144,146,212,220,15,79
1619866809890,-4,-9,-8,,-7,16,19,51,60,84,95,81,82,147,152,212,221,17,81,-4,-9,-8,,-7,16,19,51,60,84,95,81,82,147,152,212,221,17,81
1619866810017,-2,-6,-6,-5,-6,21,21,51,58,86,96,81,82,147,149,210,218,21,81,-2,-6,-6,-5,-6,21,21,51,58,86,96,81,82,147,149,210,218,21,81
1619866810137,0,-4,-4,-1,-2,24,23,54,58,91,90,83,85,149,150,217,218,23,84,0,-4,-4,-1,-2,24,23,54,58,91,90,83,85,149,150,217,218,23,84
1619866810236,-3,-7,-7,-5,-4,24,26,61,60,95,89,87,87,149,147,217,216,25,87,-3,-7,-7,-5,-4,24,26,61,60,95,89,87,87,149,147,217,216,25,87
1619866810380,0,-3,-4,,-1,28,27,62,61,97,91,87,87,152,152,215,216,27,87,0,-3,-4,,-1,28,27,62,61,97,91,87,87,152,152,215,216,27,87
1619866810520,1,-2,-3,2,0,30,28,66,64,100,89,91,89,152,152,216,212,29,90,1,-2,-3,2,0,30,28,66,64,100,89,91,89,152,152,216,212,29,90
1619866810637,2,-1,-2,3,0,32,29,68,65,102,89,90,89,151,152,218,212,30,89,2,-1,-2,3,0,32,29,68,65,102,89,90,89,151,152,218,212,30,89
1619866810763,2,0,-3,4,1,34,32,65,64,100,95,92,91,151,151,216,212,33,91,2,0,-3,4,1,34,32,65,64,100,95,92,91,151,151,216,212,33,91
1619866810903,2,-2,-2,3,2,38,35,70,67,101,98,93,94,153,152,216,215,36,93,2,-2,-2,3,2,38,35,70,67,101,98,93,94,153,152,216,215,36,93
1619866811032,2,-1,-1,2,4,37,33,70,69,98,99,93,94,152,151,215,217,35,93,2,-1,-1,2,4,37,33,70,69,98,99,93,94,152,151,215,217,35,93
1619866811187,1,-2,-2,1,2,37,32,71,69,99,98,92,92,152,151,215,215,34,92,1,-2,-2,1,2,37,32,71,69,99,98,92,92,152,151,215,215,34,92
1619866811328,2,-2,-1,2,2,37,34,72,70,101,98,94,94,155,149,219,216,35,94,2,-2,-1,2,2,37,34,72,70,101,98,94,94,155,149,219,216,35,94
1619866811457,1,-3,-3,1,1,34,34,70,69,101,98,94,94,152,149,218,218,34,94,1,-3,-3,1,1,34,34,70,69,101,98,94,94,152,149,218,218,34,94
1619866811600,0,-4,-4,2,0,38,32,71,71,101,99,94,92,151,149,219,217,35,93,0,-4,-4,2,0,38,32,71,71,101,99,94,92,151,149,219,217,35,93
1619866811720,0,-3,-4,0,0,34,29,69,69,100,99,88,89,152,148,218,222,31,88,0,-3,-4,0,0,34,29,69,69,100,99,88,89,152,148,218,222,31,88
1619866811838,0,-3,-4,0,0,33,28,69,68,100,98,87,90,152,150,217,220,30,88,0,-3,-4,0,0,33,28,69,68,100,98,87,90,152,150,217,220,30,88
1619866811944,1,-2,-3,1,1,33,29,69,69,101,98,93,90,151,149,219,220,31,91,1,-2,-3,1,1,33,29,69,69,101,98,93,90,151,149,219,220,31,91
1619866812047,1,-3,-3,0,0,32,29,69,68,100,97,87,89,151,148,216,217,30,88,1,-3,-3,0,0,32,29,69,68,100,97,87,89,151,148,216,217,30,88
1619866812157,0,-4,-4,0,0,34,29,71,70,105,98,95,92,152,148,216,220,31,93,0,-4,-4,0,0,34,29,71,70,105,98,95,92,152,148,216,220,31,93
1619866812276,0,-3,-4,0,1,38,35,74,73,106,100,101,98,151,148,215,221,36,99,0,-3,-4,0,1,38,35,74,73,106,100,101,98,151,148,215,221,36,99
1619866812394,7,2,2,10,8,46,44,84,81,117,112,110,107,154,151,218,222,45,108,7,2,2,10,8,46,44,84,81,117,112,110,107,154,151,218,222,45,108
1619866812503,17,11,10,17,20,51,51,97,91,129,126,115,116,152,157,220,224,51,115,17,11,10,17,20,51,51,97,91,129,126,115,116,152,157,220,224,51,115
1619866812614,28,22,22,28,27,65,68,108,105,142,136,125,130,156,157,220,226,66,127,28,22,22,28,27,65,68,108,105,142,136,125,130,156,157,220,226,66,127
1619866812736,43,36,36,42,42,81,79,126,120,,159,139,139,159,157,222,220,80,139,43,36,36,42,42,81,79,126,120,,159,139,139,159,157,222,220,80,139
1619866812841,53,45,46,50,51,85,87,134,130,,162,153,153,152,153,220,223,86,153,53,45,46,50,51,85,87,134,130,,162,153,153,152,153,220,223,86,153
1619866812946,59,53,53,58,57,95,97,141,139,,169,157,160,154,153,217,223,96,158,59,53,53,58,57,95,97,141,139,,169,157,160,154,153,217,223,96,158
1619866813067,69,64,63,68,68,103,104,143,145,,,167,169,155,152,222,222,103,168,69,64,63,68,68,103,104,143,145,,,167,169,155,152,222,222,103,168
1619866813280,74,68,67,72,71,109,109,,156,,,171,172,156,150,222,220,109,171,74,68,67,72,71,109,109,,156,,,171,172,156,150,222,220,109,171
1619866813439,85,77,77,80,80,111,114,156,154,,161,182,183,157,150,223,221,112,182,85,77,77,80,80,111,114,156,154,,161,182,183,157,150,223,221,112,182
1619866813541,84,77,76,79,79,111,114,157,155,,,180,183,157,152,223,221,112,181,84,77,76,79,79,111,114,157,155,,,180,183,157,152,223,221,112,181
1619866813642,79,72,72,76,76,110,111,,153,,,,180,154,150,221,221,110,180,79,72,72,76,76,110,111,,153,,,,180,154,150,221,221,110,180
1619866813755,70,66,65,71,70,106,107,143,145,,,170,170,154,150,221,220,106,170,70,66,65,71,70,106,107,143,145,,,170,170,154,150,221,220,106,170
1619866813878,58,52,52,57,57,93,94,139,137,,160,156,157,150,149,221,215,93,156,58,52,52,57,57,93,94,139,137,,160,156,157,150,149,221,215,93,156
1619866813979,46,40,40,46,47,80,80,132,123,137,,142,140,153,152,220,219,80,141,46,40,40,46,47,80,80,132,123,137,,142,140,153,152,220,219,80,141
1619866814097,33,28,28,34,34,72,73,110,111,140,139,130,131,156,154,219,224,72,130,33,28,28,34,34,72,73,110,111,140,139,130,131,156,154,219,224,72,130
1619866814213,20,14,13,20,22,53,52,99,91,130,123,117,118,151,153,220,224,52,117,20,14,13,20,22,53,52,99,91,130,123,117,118,151,153,220,224,52,117
1619866814325,8,4,3,10,10,47,46,86,83,118,113,112,109,154,151,219,220,46,110,8,4,3,10,10,47,46,86,83,118,113,112,109,154,151,219,220,46,110
1619866814442,0,-4,-4,1,0,39,36,76,74,106,105,103,99,149,148,218,221,37,101,0,-4,-4,1,0,39,36,76,74,106,105,103,99,149,148,218,221,37,101
1619866814558,-1,-6,-6,0,-1,34,29,72,71,100,98,98,94,150,145,218,222,31,96,-1,-6,-6,0,-1,34,29,72,71,100,98,98,94,150,145,218,222,31,96
1619866814676,-1,-5,-5,-2,-1,28,26,64,68,94,96,89,90,151,147,218,223,27,89,-1,-5,-5,-2,-1,28,26,64,68,94,96,89,90,151,147,218,223,27,89
1619866814801,0,-5,-5,-2,-1,27,25,63,67,93,95,86,89,149,147,217,221,26,87,0,-5,-5,-2,-1,27,25,63,67,93,95,86,89,149,147,217,221,26,87
1619866814922,0,-4,-4,-3,-1,28,27,65,68,93,94,85,88,148,147,218,220,27,86,0,-4,-4,-3,-1,28,27,65,68,93,94,85,88,148,147,218,220,27,86
1619866815036,0,-5,-5,-2,-1,29,28,67,69,97,97,87,86,152,149,218,222,28,86,0,-5,-5,-2,-1,29,28,67,69,97,97,87,86,152,149,218,222,28,86
1619866815151,-1,-5,-6,-2,-1,30,29,66,68,97,93,87,86,153,148,218,222,29,86,-1,-5,-6,-2,-1,30,29,66,68,97,93,87,86,153,148,218,222,29,86
1619866815265,0,-4,-4,0,0,31,29,67,69,96,97,86,89,152,147,219,220,30,87,0,-4,-4,0,0,31,29,67,69,96,97,86,89,152,147,219,220,30,87
1619866815495,0,-3,-4,0,0,31,30,68,70,96,98,87,89,148,145,218,218,30,88,0,-3,-4,0,0,31,30,68,70,96,98,87,89,148,145,218,218,30,88
1619866815654,0,-3,-4,0,0,34,31,71,72,98,99,90,92,151,149,215,220,32,91,0,-3,-4,0,0,34,31,71,72,98,99,90,92,151,149,215,220,32,91
1619866815785,0,-4,-4,0,0,34,32,72,72,103,100,96,94,151,150,215,218,33,95,0,-4,-4,0,0,34,32,72,72,103,100,96,94,151,150,215,218,33,95
1619866815906,1,-2,-3,0,2,39,38,76,76,106,107,103,101,153,150,214,222,38,102,1,-2,-3,0,2,39,38,76,76,106,107,103,101,153,150,214,222,38,102
1619866816011,14,8,8,,13,49,47,91,90,121,118,112,112,152,153,221,220,48,112,14,8,8,,13,49,47,91,90,121,118,112,112,152,153,221,220,48,112
1619866816120,29,24,23,28,27,62,63,105,103,134,131,123,128,153,157,221,228,62,125,29,24,23,28,27,62,63,105,103,134,131,123,128,153,157,221,228,62,125
1619866816223,40,35,35,39,39,76,77,117,116,147,147,135,135,161,157,221,222,76,135,40,35,35,39,39,76,77,117,116,147,147,135,135,161,157,221,222,76,135
1619866816327,54,46,46,51,52,85,85,132,130,163,160,151,150,153,153,221,220,85,150,54,46,46,51,52,85,85,132,130,163,160,151,150,153,153,221,220,85,150
1619866816444,65,59,60,63,59,97,102,141,142,,164,161,163,151,156,219,220,99,162,65,59,60,63,59,97,102,141,142,,164,161,163,151,156,219,220,99,162
1619866816556,72,66,66,70,68,106,108,145,153,,,170,171,155,150,220,223,107,170,72,66,66,70,68,106,108,145,153,,,170,171,155,150,220,223,107,170
1619866816670,79,72,72,75,76,111,112,154,153,,,,180,155,151,221,227,111,179,79,72,72,75,76,111,112,154,153,,,,180,155,151,221,227,111,179
1619866816780,83,75,75,79,78,111,114,155,155,164,,182,182,157,151,224,228,112,182,83,75,75,79,78,111,114,155,155,164,,182,182,157,151,224,228,112,182
1619866816890,83,75,74,78,78,111,113,156,155,162,,,181,158,151,224,231,112,182,83,75,74,78,78,111,113,156,155,162,,,181,158,151,224,231,112,182
1619866817007,79,72,72,75,76,112,111,153,153,,,,180,155,150,222,226,111,179,79,72,72,75,76,112,111,153,153,,,,180,155,150,222,226,111,179
1619866817123,69,64,64,69,69,105,106,145,152,,,166,167,154,152,219,223,105,166,69,64,64,69,69,105,106,145,152,,,166,167,154,152,219,223,105,166
1619866817237,62,57,57,62,61,97,99,142,141,,163,160,162,149,153,220,222,98,161,62,57,57,62,61,97,99,142,141,,163,160,162,149,153,220,222,98,161
1619866817358,49,42,42,49,49,82,82,130,123,138,155,142,148,152,153,218,222,82,145,49,42,42,49,49,82,82,130,123,138,155,142,148,152,153,218,222,82,145
1619866817472,38,33,33,39,39,77,77,122,115,138,144,136,137,158,157,222,227,77,136,38,33,33,39,39,77,77,122,115,138,144,136,137,158,157,222,227,77,136
1619866817585,26,21,21,29,27,66,64,107,101,136,130,122,128,152,157,218,226,65,125,26,21,21,29,27,66,64,107,101,136,130,122,128,152,157,218,226,65,125
1619866817693,19,13,13,19,21,53,52,96,91,129,121,114,116,154,151,216,225,52,115,19,13,13,19,21,53,52,96,91,129,121,114,116,154,151,216,225,52,115
1619866817797,10,5,5,11,11,47,48,88,84,117,112,112,110,154,149,215,225,47,111,10,5,5,11,11,47,48,88,84,117,112,112,110,154,149,215,225,47,111
1619866817925,6,2,1,7,6,43,41,80,79,107,109,106,101,151,148,216,226,42,103,6,2,1,7,6,43,41,80,79,107,109,106,101,151,148,216,226,42,103
1619866818043,0,-3,-3,2,2,38,35,75,72,104,99,101,96,150,148,218,226,36,98,0,-3,-3,2,2,38,35,75,72,104,99,101,96,150,148,218,226,36,98
1619866818177,0,-3,-3,0,1,34,32,71,69,98,96,89,92,151,151,218,225,33,90,0,-3,-3,0,1,34,32,71,69,98,96,89,92,151,151,218,225,33,90
1619866818282,0,-4,-4,0,0,31,29,67,68,96,94,87,90,151,151,218,220,30,88,0,-4,-4,0,0,31,29,67,68,96,94,87,90,151,151,218,220,30,88
1619866818396,0,-4,-4,0,0,31,29,65,67,92,93,86,89,150,150,216,218,30,87,0,-4,-4,0,0,31,29,65,67,92,93,86,89,150,150,216,218,30,87
1619866818512,0,-3,-3,0,1,31,29,65,67,94,92,89,86,151,146,216,217,30,87,0,-3,-3,0,1,31,29,65,67,94,92,89,86,151,146,216,217,30,87
1619866818646,0,-3,-3,0,1,31,30,65,68,93,93,88,86,150,150,215,219,30,87,0,-3,-3,0,1,31,30,65,68,93,93,88,86,150,150,215,219,30,87
1619866818758,0,-3,-3,0,1,32,31,66,68,94,91,88,87,152,148,215,221,31,87,0,-3,-3,0,1,32,31,66,68,94,91,88,87,152,148,215,221,31,87
1619866818876,1,-3,-3,0,1,32,31,66,68,95,94,88,87,152,147,215,219,31,87,1,-3,-3,0,1,32,31,66,68,95,94,88,87,152,147,215,219,31,87
1619866818985,0,-2,-3,2,1,31,31,66,67,96,91,88,88,151,146,216,220,31,88,0,-2,-3,2,1,31,31,66,67,96,91,88,88,151,146,216,220,31,88
1619866819104,1,-2,-3,0,2,32,32,66,69,93,91,88,87,151,146,216,218,32,87,1,-2,-3,0,2,32,32,66,69,93,91,88,87,151,146,216,218,32,87
1619866819213,0,-3,-3,0,1,31,33,66,69,94,92,89,88,152,146,215,218,32,88,0,-3,-3,0,1,31,33,66,69,94,92,89,88,152,146,215,218,32,88
1619866819338,1,-2,-2,0,2,31,33,66,69,96,94,88,87,152,147,215,220,32,87,1,-2,-2,0,2,31,33,66,69,96,94,88,87,152,147,215,220,32,87
1619866819443,0,-2,-3,2,1,31,33,65,69,95,95,88,88,152,149,215,220,32,88,0,-2,-3,2,1,31,33,65,69,95,95,88,88,152,149,215,220,32,88
1619866819561,1,-2,-2,0,2,31,33,65,69,98,95,89,87,151,148,214,219,32,88,1,-2,-2,0,2,31,33,65,69,98,95,89,87,151,148,214,219,32,88
1619866819679,1,-1,-2,3,3,32,33,65,69,95,92,88,87,152,146,216,220,32,87,1,-1,-2,3,3,32,33,65,69,95,92,88,87,152,146,216,220,32,87
1619866819806,1,-2,-2,0,2,32,34,66,70,95,95,88,87,151,146,216,221,33,87,1,-2,-2,0,2,32,34,66,70,95,95,88,87,151,146,216,221,33,87
1619866819922,1,-2,-3,0,2,31,33,65,69,94,92,88,87,149,146,216,223,32,87,1,-2,-3,0,2,31,33,65,69,94,92,88,87,149,146,216,223,32,87
1619866820036,3,0,0,1,4,35,35,71,72,97,98,98,96,151,149,219,222,35,97,3,0,0,1,4,35,35,71,72,97,98,98,96,151,149,219,222,35,97
1619866820147,5,2,2,7,7,42,41,79,80,106,108,108,102,153,148,216,222,41,105,5,2,2,7,7,42,41,79,80,106,108,108,102,153,148,216,222,41,105
1619866820262,14,8,8,16,13,49,47,90,88,117,115,112,111,153,151,217,220,48,111,14,8,8,16,13,49,47,90,88,117,115,112,111,153,151,217,220,48,111
1619866820385,31,26,26,31,30,65,68,106,104,137,130,125,130,154,156,220,232,66,127,31,26,26,31,30,65,68,106,104,137,130,125,130,154,156,220,232,66,127
1619866820491,47,41,41,46,46,80,80,126,123,,152,139,138,154,157,223,225,80,138,47,41,41,46,46,80,80,126,123,,152,139,138,154,157,223,225,80,138
1619866820617,59,52,52,58,57,92,94,139,136,,169,154,153,150,157,218,220,93,153,59,52,52,58,57,92,94,139,136,,169,154,153,150,157,218,220,93,153
1619866820723,70,64,63,68,65,105,106,145,152,,,169,169,152,155,221,218,105,169,70,64,63,68,65,105,106,145,152,,,169,169,152,155,221,218,105,169
1619866820842,80,72,72,75,76,111,111,152,155,,,176,176,156,156,222,219,111,176,80,72,72,75,76,111,111,152,155,,,176,176,156,156,222,219,111,176
1619866820955,81,74,73,76,76,113,111,154,157,,,177,178,156,155,222,219,112,177,81,74,73,76,76,113,111,154,157,,,177,178,156,155,222,219,112,177
1619866821070,77,70,70,73,74,111,111,151,154,,,175,175,156,155,220,219,111,175,77,70,70,73,74,111,111,151,154,,,175,175,156,155,220,219,111,175
1619866821180,70,64,63,69,66,104,106,144,151,,,166,168,152,155,219,219,105,167,70,64,63,69,66,104,106,144,151,,,166,168,152,155,219,219,105,167
1619866821293,64,58,59,62,59,96,100,141,141,,171,160,161,151,155,219,221,98,160,64,58,59,62,59,96,100,141,141,,171,160,161,151,155,219,221,98,160
1619866821408,54,47,47,52,53,86,86,134,132,160,163,151,150,153,156,218,217,86,150,54,47,47,52,53,86,86,134,132,160,163,151,150,153,156,218,217,86,150
1619866821524,44,38,38,44,44,80,79,126,122,135,151,138,139,157,158,222,230,79,138,44,38,38,44,44,80,79,126,122,135,151,138,139,157,158,222,230,79,138
1619866821650,29,25,25,30,29,67,68,108,105,137,132,125,129,154,156,222,232,67,127,29,25,25,30,29,67,68,108,105,137,132,125,129,154,156,222,232,67,127
1619866821773,19,11,11,20,21,55,54,98,95,128,125,117,119,152,155,218,228,54,118,19,11,11,20,21,55,54,98,95,128,125,117,119,152,155,218,228,54,118
1619866821895,6,3,2,9,8,44,44,82,83,111,114,111,107,152,148,216,226,44,109,6,3,2,9,8,44,44,82,83,111,114,111,107,152,148,216,226,44,109
1619866822025,0,-4,-4,0,0,36,34,73,72,100,99,103,99,152,151,217,225,35,101,0,-4,-4,0,0,36,34,73,72,100,99,103,99,152,151,217,225,35,101
1619866822136,-1,-6,-6,-3,-2,30,29,68,69,95,94,97,94,151,148,219,225,29,95,-1,-6,-6,-3,-2,30,29,68,69,95,94,97,94,151,148,219,225,29,95
1619866822257,-1,-6,-6,-4,-2,29,27,65,68,91,91,88,91,149,147,216,222,28,89,-1,-6,-6,-4,-2,29,27,65,68,91,91,88,91,149,147,216,222,28,89
1619866822395,-1,-6,-6,-3,-2,29,28,63,68,91,91,87,89,151,151,218,221,28,88,-1,-6,-6,-3,-2,29,28,63,68,91,91,87,89,151,151,218,221,28,88
1619866822500,-1,-6,-6,-3,-2,29,27,64,67,91,90,86,89,151,147,217,220,28,87,-1,-6,-6,-3,-2,29,27,64,67,91,90,86,89,151,147,217,220,28,87
1619866822624,-1,-6,-6,-3,-3,29,27,65,67,92,91,86,89,152,147,218,220,28,87,-1,-6,-6,-3,-3,29,27,65,67,92,91,86,89,152,147,218,220,28,87
1619866822736,-1,-6,-6,-2,-2,30,28,66,68,92,92,88,90,150,146,217,221,29,89,-1,-6,-6,-2,-2,30,28,66,68,92,92,88,90,150,146,217,221,29,89
1619866822852,-1,-6,-6,-3,-2,29,28,67,69,95,94,87,90,151,151,217,223,28,88,-1,-6,-6,-3,-2,29,28,67,69,95,94,87,90,151,151,217,223,28,88
1619866822956,-1,-6,-6,-3,-2,29,28,66,69,94,96,87,90,151,147,216,223,28,88,-1,-6,-6,-3,-2,29,28,66,69,94,96,87,90,151,147,216,223,28,88
1619866823058,-1,-5,-6,-3,-2,29,29,68,69,95,97,87,90,151,147,218,221,29,88,-1,-5,-6,-3,-2,29,29,68,69,95,97,87,90,151,147,218,221,29,88
1619866823189,-1,-6,-6,-2,-2,32,31,69,70,96,98,95,92,151,148,219,223,31,93,-1,-6,-6,-2,-2,32,31,69,70,96,98,95,92,151,148,219,223,31,93
1619866823322,0,-3,-4,0,1,38,36,75,73,103,102,103,100,153,150,216,225,37,101,0,-3,-4,0,1,38,36,75,73,103,102,103,100,153,150,216,225,37,101
1619866823439,7,3,3,9,9,44,44,82,82,110,112,110,104,155,150,219,225,44,107,7,3,3,9,9,44,44,82,82,110,112,110,104,155,150,219,225,44,107
1619866823557,19,11,11,20,20,53,52,95,92,125,122,117,118,153,157,221,228,52,117,19,11,11,20,20,53,52,95,92,125,122,117,118,153,157,221,228,52,117
1619866823674,29,24,24,29,27,64,65,106,104,134,131,124,129,154,157,220,229,64,126,29,24,24,29,27,64,65,106,104,134,131,124,129,154,157,220,229,64,126
1619866823785,41,35,36,41,41,78,78,122,116,,148,136,139,159,158,222,227,78,137,41,35,36,41,41,78,78,122,116,,148,136,139,159,158,222,227,78,137
1619866823897,50,43,42,48,48,82,82,131,125,156,157,148,147,154,154,220,222,82,147,50,43,42,48,48,82,82,131,125,156,157,148,147,154,154,220,222,82,147
1619866824005,58,51,51,55,55,90,90,138,136,,165,152,152,152,152,217,222,90,152,58,51,51,55,55,90,90,138,136,,165,152,152,152,152,217,222,90,152
1619866824108,60,53,54,59,58,93,94,139,137,,164,156,156,152,151,217,222,93,156,60,53,54,59,58,93,94,139,137,,164,156,156,152,151,217,222,93,156
1619866824208,58,51,52,56,55,90,90,138,135,162,163,153,153,152,152,217,221,90,153,58,51,52,56,55,90,90,138,135,162,163,153,153,152,152,217,221,90,153
1619866824318,53,47,46,51,52,84,85,132,129,160,161,151,150,151,152,217,222,84,150,53,47,46,51,52,84,85,132,129,160,161,151,150,151,152,217,222,84,150
1619866824434,46,40,40,45,46,79,79,127,122,137,151,139,141,149,156,221,223,79,140,46,40,40,45,46,79,79,127,122,137,151,139,141,149,156,221,223,79,140
1619866824537,39,34,34,40,40,77,77,124,117,138,147,136,139,156,156,223,224,77,137,39,34,34,40,40,77,77,124,117,138,147,136,139,156,156,223,224,77,137
1619866824652,31,26,26,32,30,69,69,110,107,136,136,125,128,155,154,221,226,69,126,31,26,26,32,30,69,69,110,107,136,136,125,128,155,154,221,226,69,126
1619866824763,25,19,19,26,24,61,60,105,102,134,130,120,126,154,157,220,226,60,123,25,19,19,26,24,61,60,105,102,134,130,120,126,154,157,220,226,60,123
1619866824873,19,11,10,19,14,54,52,95,92,128,123,115,117,153,151,220,,53,116,19,11,10,19,14,54,52,95,92,128,123,115,117,153,151,220,,53,116
1619866824994,8,4,3,10,10,48,47,86,84,116,115,114,112,153,148,217,223,47,113,8,4,3,10,10,48,47,86,84,116,115,114,112,153,148,217,223,47,113
1619866825102,3,0,0,4,5,43,40,80,81,107,110,110,105,153,150,217,223,41,107,3,0,0,4,5,43,40,80,81,107,110,110,105,153,150,217,223,41,107
1619866825215,0,-4,-4,0,0,38,35,75,75,104,106,105,100,153,151,218,227,36,102,0,-4,-4,0,0,38,35,75,75,104,106,105,100,153,151,218,227,36,102
1619866825325,-1,-6,-5,-3,-1,32,32,72,71,101,98,100,97,153,152,216,225,32,98,-1,-6,-5,-3,-1,32,32,72,71,101,98,100,97,153,152,216,225,32,98
1619866825438,-1,-6,-6,-3,-2,29,29,69,70,96,98,98,95,152,151,219,223,29,96,-1,-6,-6,-3,-2,29,29,69,70,96,98,98,95,152,151,219,223,29,96
1619866825542,-1,-6,-6,-4,-3,29,28,67,69,95,97,96,93,153,152,219,222,28,94,-1,-6,-6,-4,-3,29,28,67,69,95,97,96,93,153,152,219,222,28,94
1619866825653,-2,-6,-6,-4,-2,28,28,67,69,95,97,91,94,153,151,219,223,28,92,-2,-6,-6,-4,-2,28,28,67,69,95,97,91,94,153,151,219,223,28,92
1619866825775,-1,-6,-6,-3,-2,29,28,68,69,96,97,97,94,152,151,218,220,28,95,-1,-6,-6,-3,-2,29,28,68,69,96,97,97,94,152,151,218,220,28,95
1619866825886,-2,-6,-6,-3,-2,29,28,69,69,97,98,97,95,152,149,220,220,28,96,-2,-6,-6,-3,-2,29,28,69,69,97,98,97,95,152,149,220,220,28,96
1619866826000,-1,-6,-6,-2,-2,36,34,74,74,104,102,102,99,152,152,217,224,35,100,-1,-6,-6,-2,-2,36,34,74,74,104,102,102,99,152,152,217,224,35,100
1619866826108,1,-2,-2,3,3,41,40,79,79,107,110,108,103,153,150,214,223,40,105,1,-2,-2,3,3,41,40,79,79,107,110,108,103,153,150,214,223,40,105
1619866826219,9,4,3,10,10,49,46,90,86,121,114,109,108,155,151,217,224,47,108,9,4,3,10,10,49,46,90,86,121,114,109,108,155,151,217,224,47,108
1619866826322,23,16,17,23,23,57,55,103,100,132,127,118,120,153,157,218,221,56,119,23,16,17,23,23,57,55,103,100,132,127,118,120,153,157,218,221,56,119
1619866826431,28,23,23,29,27,67,66,108,105,137,132,124,128,155,157,221,226,66,126,28,23,23,29,27,67,66,108,105,137,132,124,128,155,157,221,226,66,126
1619866826543,36,31,30,36,36,73,75,121,113,139,147,132,136,158,156,222,225,74,134,36,31,30,36,36,73,75,121,113,139,147,132,136,158,156,222,225,74,134
1619866826652,44,38,38,43,43,80,79,125,119,137,152,137,139,156,157,221,224,79,138,44,38,38,43,43,80,79,125,119,137,152,137,139,156,157,221,224,79,138
1619866826764,52,44,43,49,49,84,85,134,130,160,161,151,150,152,152,219,222,84,150,52,44,43,49,49,84,85,134,130,160,161,151,150,152,152,219,222,84,150
1619866826883,60,54,54,60,59,95,98,142,140,,168,158,159,151,155,217,221,96,158,60,54,54,60,59,95,98,142,140,,168,158,159,151,155,217,221,96,158
1619866827001,66,61,61,65,66,102,104,145,154,,,167,166,150,153,218,222,103,166,66,61,61,65,66,102,104,145,154,,,167,166,150,153,218,222,103,166
1619866827117,73,67,67,72,72,109,110,152,155,,,177,176,155,151,220,220,109,176,73,67,67,72,72,109,110,152,155,,,177,176,155,151,220,220,109,176
1619866827238,78,71,71,75,76,112,111,153,157,,,182,183,155,150,221,222,111,182,78,71,71,75,76,112,111,153,157,,,182,183,155,150,221,222,111,182
1619866827358,80,73,73,77,77,111,112,154,155,,,184,184,156,151,221,222,111,184,80,73,73,77,77,111,112,154,155,,,184,184,156,151,221,222,111,184
1619866827480,78,71,71,75,76,111,111,153,155,,,182,180,155,150,220,222,111,181,78,71,71,75,76,111,111,153,155,,,182,180,155,150,220,222,111,181
1619866827609,70,65,65,70,70,107,108,,153,,,170,171,154,150,218,218,107,170,70,65,65,70,70,107,108,,153,,,170,171,154,150,218,218,107,170
1619866827717,61,55,56,62,61,97,99,143,141,,162,161,164,152,150,220,220,98,162,61,55,56,62,61,97,99,143,141,,162,161,164,152,150,220,220,98,162
1619866827826,54,46,47,53,52,88,87,136,132,,156,152,151,151,153,217,221,87,151,54,46,47,53,52,88,87,136,132,,156,152,151,151,153,217,221,87,151
1619866827931,39,34,34,40,41,78,79,125,117,136,144,135,139,154,154,220,224,78,137,39,34,34,40,41,78,79,125,117,136,144,135,139,154,154,220,224,78,137
1619866828041,30,25,25,31,30,70,69,110,107,136,131,125,130,152,155,221,223,69,127,30,25,25,31,30,70,69,110,107,136,131,125,130,152,155,221,223,69,127
1619866828148,23,17,18,24,23,60,58,103,100,133,128,117,121,152,153,221,223,59,119,23,17,18,24,23,60,58,103,100,133,128,117,121,152,153,221,223,59,119
1619866828254,12,7,7,16,13,50,48,91,89,123,120,110,112,152,151,218,223,49,111,12,7,7,16,13,50,48,91,89,123,120,110,112,152,151,218,223,49,111
1619866828384,4,0,0,6,6,44,43,83,80,114,111,110,106,153,148,216,224,43,108,4,0,0,6,6,44,43,83,80,114,111,110,106,153,148,216,224,43,108
1619866828493,1,-2,-2,1,2,40,37,78,77,106,108,105,102,152,150,217,228,38,103,1,-2,-2,1,2,40,37,78,77,106,108,105,102,152,150,217,228,38,103
1619866828618,0,-5,-5,-2,-1,35,32,74,73,103,99,102,99,153,151,218,225,33,100,0,-5,-5,-2,-1,35,32,74,73,103,99,102,99,153,151,218,225,33,100
1619866828749,0,-6,-5,-3,-1,33,32,73,72,100,98,101,98,152,151,217,222,32,99,0,-6,-5,-3,-1,33,32,73,72,100,98,101,98,152,151,217,222,32,99
1619866828873,0,-5,-5,-3,-1,35,34,75,72,103,99,102,98,151,150,216,223,34,100,0,-5,-5,-3,-1,35,34,75,72,103,99,102,98,151,150,216,223,34,100
1619866829010,1,-2,-2,1,3,40,39,77,76,105,109,105,101,153,149,215,228,39,103,1,-2,-2,1,3,40,39,77,76,105,109,105,101,153,149,215,228,39,103
1619866829136,7,3,3,8,9,45,45,84,83,115,113,111,108,155,150,217,222,45,109,7,3,3,8,9,45,45,84,83,115,113,111,108,155,150,217,222,45,109
1619866829251,12,6,6,11,13,49,48,94,89,127,120,111,114,155,156,219,228,48,112,12,6,6,11,13,49,48,94,89,127,120,111,114,155,156,219,228,48,112
1619866829352,20,12,12,21,21,56,55,102,97,132,127,118,120,153,156,221,,55,119,20,12,12,21,21,56,55,102,97,132,127,118,120,153,156,221,,55,119
1619866829462,25,20,20,26,24,63,63,107,102,135,130,123,127,155,158,221,,63,125,25,20,20,26,24,63,63,107,102,135,130,123,127,155,158,221,,63,125
1619866829586,31,26,26,31,30,70,70,110,108,135,140,126,130,157,158,222,228,70,128,31,26,26,31,30,70,70,110,108,135,140,126,130,157,158,222,228,70,128
1619866829690,36,31,31,37,37,74,75,113,113,137,144,134,137,155,157,223,225,74,135,36,31,31,37,37,74,75,113,113,137,144,134,137,155,157,223,225,74,135
1619866829797,38,33,33,39,39,77,77,125,115,137,145,137,139,155,158,223,225,77,138,38,33,33,39,39,77,77,125,115,137,145,137,139,155,158,223,225,77,138
1619866829906,39,34,34,40,40,78,77,125,116,136,151,138,140,155,157,222,225,77,139,39,34,34,40,40,78,77,125,116,136,151,138,140,155,157,222,225,77,139
1619866830020,40,35,35,40,40,78,78,126,116,136,152,138,139,154,158,221,227,78,138,40,35,35,40,40,78,78,126,116,136,152,138,139,154,158,221,227,78,138
1619866830143,37,32,32,37,38,76,76,126,114,,143,136,138,155,158,222,229,76,137,37,32,32,37,38,76,76,126,114,,143,136,138,155,158,222,229,76,137
1619866830270,30,24,25,31,29,69,69,111,108,137,138,126,129,156,158,223,225,69,127,30,24,25,31,29,69,69,111,108,137,138,126,129,156,158,223,225,69,127
1619866830403,23,17,15,24,22,61,57,105,100,135,128,119,123,155,156,221,230,59,121,23,17,15,24,22,61,57,105,100,135,128,119,123,155,156,221,230,59,121
1619866830514,12,6,6,11,12,51,48,96,91,127,120,112,114,153,156,218,228,49,113,12,6,6,11,12,51,48,96,91,127,120,112,114,153,156,218,228,49,113
1619866830634,6,3,2,9,9,46,46,87,84,120,115,113,112,154,150,217,223,46,112,6,3,2,9,9,46,46,87,84,120,115,113,112,154,150,217,223,46,112
1619866830744,2,-1,-2,2,4,43,41,83,81,114,112,110,107,154,150,216,226,42,108,2,-1,-2,2,4,43,41,83,81,114,112,110,107,154,150,216,226,42,108
1619866830851,-1,-5,-6,-1,-1,38,35,76,74,105,106,106,102,153,150,217,226,36,104,-1,-5,-6,-1,-1,38,35,76,74,105,106,106,102,153,150,217,226,36,104
1619866830962,-1,-6,-6,-2,-2,35,33,74,73,104,101,103,100,153,151,217,224,34,101,-1,-6,-6,-2,-2,35,33,74,73,104,101,103,100,153,151,217,224,34,101
1619866831072,-1,-6,-6,-4,-2,31,31,73,71,104,99,101,99,154,152,218,223,31,100,-1,-6,-6,-4,-2,31,31,73,71,104,99,101,99,154,152,218,223,31,100
1619866831173,-1,-6,-6,-3,-2,29,28,71,70,102,98,101,98,154,152,217,223,28,99,-1,-6,-6,-3,-2,29,28,71,70,102,98,101,98,154,152,217,223,28,99
1619866831299,-2,-7,-6,-3,-3,28,27,69,69,100,98,100,97,154,156,216,224,27,98,-2,-7,-6,-3,-3,28,27,69,69,100,98,100,97,154,156,216,224,27,98
1619866831429,-2,-6,-6,-5,-2,28,26,68,69,96,97,96,95,152,156,217,219,27,95,-2,-6,-6,-5,-2,28,26,68,69,96,97,96,95,152,156,217,219,27,95
1619866831545,-2,-6,-6,-2,-2,28,26,68,69,96,96,97,95,153,154,219,219,27,96,-2,-6,-6,-2,-2,28,26,68,69,96,96,97,95,153,154,219,219,27,96
1619866831658,-2,-6,-6,-3,-3,28,27,68,69,96,98,98,95,153,157,217,220,27,96,-2,-6,-6,-3,-3,28,27,68,69,96,98,98,95,153,157,217,220,27,96
1619866831776,-1,-6,-6,-4,-2,31,32,71,72,100,99,100,98,157,153,216,222,31,99,-1,-6,-6,-4,-2,31,32,71,72,100,99,100,98,157,153,216,222,31,99
1619866831897,1,-3,-3,0,1,39,37,76,77,105,108,107,102,153,150,217,216,38,104,1,-3,-3,0,1,39,37,76,77,105,108,107,102,153,150,217,216,38,104
1619866832006,5,1,0,5,5,44,44,83,82,114,113,110,108,155,149,216,222,44,109,5,1,0,5,5,44,44,83,82,114,113,110,108,155,149,216,222,44,109
1619866832122,11,6,6,11,11,48,48,92,90,123,117,110,112,155,151,217,224,48,111,11,6,6,11,11,48,48,92,90,123,117,110,112,155,151,217,224,48,111
1619866832221,22,16,14,22,21,56,56,102,100,130,129,115,119,154,156,219,227,56,117,22,16,14,22,21,56,56,102,100,130,129,115,119,154,156,219,227,56,117
1619866832342,27,20,21,25,25,63,63,107,104,136,132,123,126,153,157,220,226,63,124,27,20,21,25,25,63,63,107,104,136,132,123,126,153,157,220,226,63,124
1619866832445,32,26,26,32,30,69,70,110,110,135,142,126,130,152,155,221,225,69,128,32,26,26,32,30,69,70,110,110,135,142,126,130,152,155,221,225,69,128
1619866832551,35,30,29,37,33,73,74,113,114,,144,130,136,154,156,220,225,73,133,35,30,29,37,33,73,74,113,114,,144,130,136,154,156,220,225,73,133
1619866832656,39,34,34,40,39,78,78,127,115,136,150,133,138,154,156,220,223,78,135,39,34,34,40,39,78,78,127,115,136,150,133,138,154,156,220,223,78,135
1619866832771,44,37,37,43,43,79,78,127,119,,156,139,139,152,157,221,225,78,139,44,37,37,43,43,79,78,127,119,,156,139,139,152,157,221,225,78,139
1619866832893,46,39,40,44,45,80,79,131,125,,159,142,142,150,155,220,223,79,142,46,39,40,44,45,80,79,131,125,,159,142,142,150,155,220,223,79,142
1619866833019,46,40,40,44,46,81,80,132,127,,161,148,148,151,153,219,224,80,148,46,40,40,44,46,81,80,132,127,,161,148,148,151,153,219,224,80,148
1619866833134,44,37,37,43,43,80,79,127,120,,156,139,140,153,156,222,208,79,139,44,37,37,43,43,80,79,127,120,,156,139,140,153,156,222,208,79,139
1619866833245,37,32,32,38,38,77,77,122,114,141,144,133,138,151,155,222,224,77,135,37,32,32,38,38,77,77,122,114,141,144,133,138,151,155,222,224,77,135
1619866833366,31,27,27,33,32,72,71,112,110,135,141,130,133,152,156,221,224,71,131,31,27,27,33,32,72,71,112,110,135,141,130,133,152,156,221,224,71,131
1619866833485,24,19,19,26,24,64,61,108,102,134,131,123,126,152,157,219,226,62,124,24,19,19,26,24,64,61,108,102,134,131,123,126,152,157,219,226,62,124
1619866833595,19,11,10,21,15,56,54,102,98,130,129,117,120,153,155,219,227,55,118,19,11,10,21,15,56,54,102,98,130,129,117,120,153,155,219,227,55,118
1619866833700,8,3,3,9,10,51,46,89,90,124,118,110,110,154,151,218,223,48,110,8,3,3,9,10,51,46,89,90,124,118,110,110,154,151,218,223,48,110
1619866833810,4,1,0,7,7,44,43,86,82,117,114,112,110,155,149,216,222,43,111,4,1,0,7,7,44,43,86,82,117,114,112,110,155,149,216,222,43,111
1619866833951,0,-4,-4,1,0,40,38,79,78,109,110,110,107,154,148,216,222,39,108,0,-4,-4,1,0,40,38,79,78,109,110,110,107,154,148,216,222,39,108
1619866834056,0,-6,-6,-2,-5,36,34,76,74,105,103,106,101,154,151,216,223,35,103,0,-6,-6,-2,-5,36,34,76,74,105,103,106,101,154,151,216,223,35,103
1619866834159,-1,-6,-6,-3,-2,31,31,73,72,100,99,102,99,155,152,219,224,31,100,-1,-6,-6,-3,-2,31,31,73,72,100,99,102,99,155,152,219,224,31,100
1619866834261,-1,-6,-6,-3,-3,29,28,72,70,99,99,101,97,152,151,217,221,28,99,-1,-6,-6,-3,-3,29,28,72,70,99,99,101,97,152,151,217,221,28,99
1619866834386,-2,-7,-6,-3,-3,28,27,69,69,96,99,100,96,153,155,215,221,27,98,-2,-7,-6,-3,-3,28,27,69,69,96,99,100,96,153,155,215,221,27,98
1619866834497,-2,-6,-6,-2,-3,28,27,69,69,96,99,97,95,153,159,218,221,27,96,-2,-6,-6,-2,-3,28,27,69,69,96,99,97,95,153,159,218,221,27,96
1619866834602,-2,-7,-6,-3,-3,28,27,69,69,96,98,97,94,153,159,217,222,27,95,-2,-7,-6,-3,-3,28,27,69,69,96,98,97,94,153,159,217,222,27,95
1619866834712,-2,-6,-6,-3,-3,28,27,69,71,97,98,98,96,152,158,217,222,27,97,-2,-6,-6,-3,-3,28,27,69,71,97,98,98,96,152,158,217,222,27,97
1619866834833,-1,-6,-5,-3,-2,30,29,71,71,101,99,100,97,152,155,215,224,29,98,-1,-6,-5,-3,-2,30,29,71,71,101,99,100,97,152,155,215,224,29,98
1619866834938,0,-5,-5,-1,-1,36,34,76,74,106,104,103,100,155,152,217,223,35,101,0,-5,-5,-1,-1,36,34,76,74,106,104,103,100,155,152,217,223,35,101
1619866835054,2,-1,-1,4,4,42,40,79,80,108,112,110,103,153,149,216,224,41,106,2,-1,-1,4,4,42,40,79,80,108,112,110,103,153,149,216,224,41,106
1619866835168,8,3,3,9,9,48,49,88,85,118,115,113,113,154,150,217,224,48,113,8,3,3,9,9,48,49,88,85,118,115,113,113,154,150,217,224,48,113
1619866835273,15,9,9,17,14,53,52,97,93,128,124,114,116,154,155,217,227,52,115,15,9,9,17,14,53,52,97,93,128,124,114,116,154,155,217,227,52,115
1619866835399,24,18,18,25,23,59,58,105,101,134,129,119,123,153,157,219,227,58,121,24,18,18,25,23,59,58,105,101,134,129,119,123,153,157,219,227,58,121
1619866835507,31,26,26,31,30,69,70,110,108,134,140,125,130,155,156,223,225,69,127,31,26,26,31,30,69,70,110,108,134,140,125,130,155,156,223,225,69,127
1619866835619,36,31,30,37,36,74,75,112,113,136,144,131,135,155,155,222,225,74,133,36,31,30,37,36,74,75,112,113,136,144,131,135,155,155,222,225,74,133
1619866835720,43,37,36,42,42,79,79,125,118,136,150,137,138,152,155,220,223,79,137,43,37,36,42,42,79,79,125,118,136,150,137,138,152,155,220,223,79,137
1619866835840,47,42,41,48,48,83,82,134,128,,159,149,149,151,151,218,222,82,149,47,42,41,48,48,83,82,134,128,,159,149,149,151,151,218,222,82,149
1619866835952,58,51,51,58,56,93,94,143,138,,162,155,154,151,149,219,221,93,154,58,51,51,58,56,93,94,143,138,,162,155,154,151,149,219,221,93,154
1619866836064,64,58,58,64,59,99,100,146,143,,154,162,164,149,152,220,222,99,163,64,58,58,64,59,99,100,146,143,,154,162,164,149,152,220,222,99,163
1619866836178,68,63,63,68,68,106,106,,152,,156,173,172,154,149,220,225,106,172,68,63,63,68,68,106,106,,152,,156,173,172,154,149,220,225,106,172
1619866836292,76,69,69,74,74,111,112,151,153,,,179,181,154,147,220,227,111,180,76,69,69,74,74,111,112,151,153,,,179,181,154,147,220,227,111,180
1619866836405,79,72,72,76,76,111,111,154,156,164,,183,183,154,150,221,230,111,183,79,72,72,76,76,111,111,154,156,164,,183,183,154,150,221,230,111,183
1619866836518,78,71,71,75,76,111,111,154,155,,,183,185,155,149,221,225,111,184,78,71,71,75,76,111,111,154,155,,,183,185,155,149,221,225,111,184
1619866836637,71,66,65,71,70,108,110,148,146,,,174,174,154,151,220,222,109,174,71,66,65,71,70,108,110,148,146,,,174,174,154,151,220,222,109,174
1619866836755,63,57,58,62,58,97,101,143,143,,164,162,163,150,150,216,220,99,162,63,57,58,62,58,97,101,143,143,,164,162,163,150,150,216,220,99,162
1619866836861,54,47,47,54,54,89,90,136,136,,160,152,152,152,150,216,220,89,152,54,47,47,54,54,89,90,136,136,,160,152,152,152,150,216,220,89,152
1619866836965,44,38,38,44,44,81,79,124,119,137,146,138,140,155,155,219,221,80,139,44,38,38,44,44,81,79,124,119,137,146,138,140,155,155,219,221,80,139
1619866837069,32,27,27,32,32,71,71,109,110,136,137,128,132,155,156,220,224,71,130,32,27,27,32,32,71,71,109,110,136,137,128,132,155,156,220,224,71,130
1619866837183,23,18,18,23,23,58,56,104,99,132,128,118,123,152,156,219,226,57,120,23,18,18,23,23,58,56,104,99,132,128,118,123,152,156,219,226,57,120
1619866837291,7,4,3,9,9,47,46,87,84,117,114,113,112,154,150,217,223,46,112,7,4,3,9,9,47,46,87,84,117,114,113,112,154,150,217,223,46,112
1619866837417,0,-3,-3,2,4,41,39,78,79,106,109,110,105,152,148,216,224,40,107,0,-3,-3,2,4,41,39,78,79,106,109,110,105,152,148,216,224,40,107
1619866837520,-1,-6,-6,-3,,34,32,73,71,99,98,104,98,153,150,217,223,33,101,-1,-6,-6,-3,,34,32,73,71,99,98,104,98,153,150,217,223,33,101
1619866837642,-1,-6,-6,-4,-3,30,28,72,69,96,96,99,96,153,151,218,225,29,97,-1,-6,-6,-4,-3,30,28,72,69,96,96,99,96,153,151,218,225,29,97
1619866837750,-2,-6,-6,-4,-3,28,26,68,68,95,92,94,92,151,149,216,225,27,93,-2,-6,-6,-4,-3,28,26,68,68,95,92,94,92,151,149,216,225,27,93
1619866837863,-1,-6,-6,-4,-3,27,25,66,65,95,91,89,92,152,151,216,222,26,90,-1,-6,-6,-4,-3,27,25,66,65,95,91,89,92,152,151,216,222,26,90
1619866837970,-1,-6,-6,-4,-3,27,25,64,65,95,92,89,92,154,154,217,222,26,90,-1,-6,-6,-4,-3,27,25,64,65,95,92,89,92,154,154,217,222,26,90
1619866838074,-1,-6,-6,-3,-3,28,25,65,65,95,92,91,91,154,157,217,221,26,91,-1,-6,-6,-3,-3,28,25,65,65,95,92,91,91,154,157,217,221,26,91
1619866838179,-1,-5,-6,-3,-2,28,26,66,65,94,92,90,91,154,158,217,221,27,90,-1,-5,-6,-3,-2,28,26,66,65,94,92,90,91,154,158,217,221,27,90
1619866838287,-1,-5,-6,-3,-2,28,26,66,65,95,90,87,90,153,155,218,220,27,88,-1,-5,-6,-3,-2,28,26,66,65,95,90,87,90,153,155,218,220,27,88
1619866838409,-1,-5,-6,-3,-2,28,25,66,66,95,89,87,90,152,154,218,220,26,88,-1,-5,-6,-3,-2,28,25,66,66,95,89,87,90,152,154,218,220,26,88
1619866838523,-1,-5,-5,-2,-2,28,25,66,64,95,90,86,88,152,152,217,219,26,87,-1,-5,-5,-2,-2,28,25,66,64,95,90,86,88,152,152,217,219,26,87
1619866838640,-1,-5,-6,-4,-3,27,24,62,64,91,91,85,88,152,153,218,222,25,86,-1,-5,-6,-4,-3,27,24,62,64,91,91,85,88,152,153,218,222,25,86
1619866838749,-1,-6,-6,-4,-3,27,24,61,63,89,90,86,84,150,148,218,221,25,85,-1,-6,-6,-4,-3,27,24,61,63,89,90,86,84,150,148,218,221,25,85
1619866838880,-1,-6,-6,-2,-3,26,23,60,63,90,90,85,88,152,150,218,220,24,86,-1,-6,-6,-2,-3,26,23,60,63,90,90,85,88,152,150,218,220,24,86
1619866838995,-2,-6,-6,-2,-3,26,23,60,62,90,90,86,87,150,148,217,220,24,86,-2,-6,-6,-2,-3,26,23,60,62,90,90,86,87,150,148,217,220,24,86
1619866839107,-2,-6,-6,-2,-3,25,23,62,62,92,89,86,89,151,151,218,222,24,87,-2,-6,-6,-2,-3,25,23,62,62,92,89,86,89,151,151,218,222,24,87
1619866839214,-2,-6,-6,-5,-3,25,23,61,62,92,90,87,89,151,148,217,223,24,88,-2,-6,-6,-5,-3,25,23,61,62,92,90,87,89,151,148,217,223,24,88
1619866839324,-2,-6,-6,-6,-3,25,23,61,63,92,91,87,90,151,150,218,221,24,88,-2,-6,-6,-6,-3,25,23,61,63,92,91,87,90,151,150,218,221,24,88
1619866839441,-2,-6,-6,-4,-3,27,25,65,67,96,94,92,92,152,148,220,224,26,92,-2,-6,-6,-4,-3,27,25,65,67,96,94,92,92,152,148,220,224,26,92
1619866839544,0,-4,-4,-1,0,36,34,76,74,106,102,104,101,153,149,216,223,35,102,0,-4,-4,-1,0,36,34,76,74,106,102,104,101,153,149,216,223,35,102
1619866839657,8,3,3,9,9,47,49,87,85,119,115,112,113,154,152,217,226,48,112,8,3,3,9,9,47,49,87,85,119,115,112,113,154,152,217,226,48,112
1619866839757,26,20,20,26,24,61,60,105,102,137,130,123,127,152,159,220,228,60,125,26,20,20,26,24,61,60,105,102,137,130,123,127,152,159,220,228,60,125
1619866839869,37,32,32,36,36,74,75,123,114,,145,132,136,157,157,222,226,74,134,37,32,32,36,36,74,75,123,114,,145,132,136,157,157,222,226,74,134
1619866839983,50,43,43,47,47,81,80,132,123,,157,142,140,152,155,221,224,80,141,50,43,43,47,47,81,80,132,123,,157,142,140,152,155,221,224,80,141
1619866840091,60,53,54,58,56,93,92,141,137,,162,153,153,151,149,217,220,92,153,60,53,54,58,56,93,92,141,137,,162,153,153,151,149,217,220,92,153
1619866840205,70,65,65,69,68,103,105,144,144,,167,166,167,149,150,217,221,104,166,70,65,65,69,68,103,105,144,144,,167,166,167,149,150,217,221,104,166
1619866840318,80,73,72,74,74,110,110,150,155,,,176,176,154,150,217,226,110,176,80,73,72,74,74,110,110,150,155,,,176,176,154,150,217,226,110,176
1619866840432,85,77,77,79,79,111,112,153,152,,,182,182,155,149,220,230,111,182,85,77,77,79,79,111,112,153,152,,,182,182,155,149,220,230,111,182
1619866840536,86,78,78,80,80,111,113,154,154,160,,182,,157,151,221,230,112,182,86,78,78,80,80,111,113,154,154,160,,182,,157,151,221,230,112,182
1619866840651,80,73,73,75,76,110,111,152,153,,,179,179,154,150,218,230,110,179,80,73,73,75,76,110,111,152,153,,,179,179,154,150,218,230,110,179
1619866840764,69,64,64,69,67,103,105,144,144,,159,166,165,154,150,221,225,104,165,69,64,64,69,67,103,105,144,144,,159,166,165,154,150,221,225,104,165
1619866840881,58,51,52,56,56,91,92,138,136,,160,153,153,151,147,218,218,91,153,58,51,52,56,56,91,92,138,136,,160,153,153,151,147,218,218,91,153
1619866840993,49,42,42,47,47,80,80,128,123,139,155,142,141,153,154,218,219,80,141,49,42,42,47,47,80,80,128,123,139,155,142,141,153,154,218,219,80,141
1619866841100,35,30,30,37,36,73,73,111,111,137,140,131,135,154,155,219,227,73,133,35,30,30,37,36,73,73,111,111,137,140,131,135,154,155,219,227,73,133
1619866841205,26,20,21,28,25,62,61,106,101,134,129,121,126,152,157,220,227,61,123,26,20,21,28,25,62,61,106,101,134,129,121,126,152,157,220,227,61,123
1619866841320,16,9,9,18,14,52,51,95,90,127,120,112,115,154,152,218,,51,113,16,9,9,18,14,52,51,95,90,127,120,112,115,154,152,218,,51,113
1619866841429,7,3,3,9,8,46,46,85,82,117,112,112,108,153,149,216,227,46,110,7,3,3,9,8,46,46,85,82,117,112,112,108,153,149,216,227,46,110
1619866841545,0,-3,-4,1,1,40,37,78,77,106,107,108,103,152,150,215,225,38,105,0,-3,-4,1,1,40,37,78,77,106,107,108,103,152,150,215,225,38,105
1619866841652,-1,-6,-6,-2,-2,34,31,73,72,100,99,101,98,154,152,216,221,32,99,-1,-6,-6,-2,-2,34,31,73,72,100,99,101,98,154,152,216,221,32,99
1619866841763,-1,-6,-6,-3,-2,29,28,69,69,97,96,98,94,151,151,218,223,28,96,-1,-6,-6,-3,-2,29,28,69,69,97,96,98,94,151,151,218,223,28,96
1619866841874,-1,-5,-5,-1,-2,28,26,67,68,95,94,90,93,151,153,218,222,27,91,-1,-5,-5,-1,-2,28,26,67,68,95,94,90,93,151,153,218,222,27,91
1619866841989,-1,-5,-5,-3,-2,27,26,64,67,93,92,91,89,152,153,216,220,26,90,-1,-5,-5,-3,-2,27,26,64,67,93,92,91,89,152,153,216,220,26,90
1619866842092,-2,-6,-6,-5,-3,27,25,64,66,95,92,91,89,153,154,217,221,26,90,-2,-6,-6,-5,-3,27,25,64,66,95,92,91,89,153,154,217,221,26,90
1619866842197,-1,-6,-6,-4,-3,27,25,63,66,95,90,90,89,153,156,217,220,26,89,-1,-6,-6,-4,-3,27,25,63,66,95,90,90,89,153,156,217,220,26,89
1619866842308,-1,-6,-6,-4,-3,27,26,63,67,96,93,91,89,151,151,217,221,26,90,-1,-6,-6,-4,-3,27,26,63,67,96,93,91,89,151,151,217,221,26,90
1619866842423,-1,-6,-6,-3,-2,29,27,68,69,97,96,95,93,152,150,218,223,28,94,-1,-6,-6,-3,-2,29,27,68,69,97,96,95,93,152,150,218,223,28,94
1619866842542,0,-4,-5,-1,-1,33,32,72,72,101,100,100,97,156,151,218,224,32,98,0,-4,-5,-1,-1,33,32,72,72,101,100,100,97,156,151,218,224,32,98
1619866842644,0,-3,-3,2,2,41,39,78,78,107,109,107,102,154,149,215,224,40,104,0,-3,-3,2,2,41,39,78,78,107,109,107,102,154,149,215,224,40,104
1619866842758,6,2,1,8,7,45,44,86,83,117,113,112,108,156,150,216,223,44,110,6,2,1,8,7,45,44,86,83,117,113,112,108,156,150,216,223,44,110
1619866842862,10,4,4,11,11,48,46,93,89,125,120,111,111,155,152,216,219,47,111,10,4,4,11,11,48,46,93,89,125,120,111,111,155,152,216,219,47,111
1619866842971,16,10,10,16,19,52,51,99,93,131,127,115,116,155,154,218,224,51,115,16,10,10,16,19,52,51,99,93,131,127,115,116,155,154,218,224,51,115
1619866843082,26,20,20,26,24,65,63,108,103,136,133,123,125,155,159,221,229,64,124,26,20,20,26,24,65,63,108,103,136,133,123,125,155,159,221,229,64,124
1619866843189,31,26,26,33,31,72,71,111,111,136,142,129,134,154,159,220,225,71,131,31,26,26,33,31,72,71,111,111,136,142,129,134,154,159,220,225,71,131
1619866843299,38,33,32,38,38,77,77,127,115,137,154,134,138,155,159,221,224,77,136,38,33,32,38,38,77,77,127,115,137,154,134,138,155,159,221,224,77,136
1619866843405,42,36,35,41,39,78,79,128,119,137,158,136,139,155,156,222,209,78,137,42,36,35,41,39,78,79,128,119,137,158,136,139,155,156,222,209,78,137
1619866843516,43,37,36,41,40,79,78,125,122,138,160,138,139,153,156,222,208,78,138,43,37,36,41,40,79,78,125,122,138,160,138,139,153,156,222,208,78,138
1619866843628,42,36,36,41,39,79,79,126,121,,160,138,140,153,156,220,,79,139,42,36,36,41,39,79,79,126,121,,160,138,140,153,156,220,,79,139
1619866843739,38,34,33,40,37,77,78,123,118,,153,133,139,155,157,222,226,77,136,38,34,33,40,37,77,78,123,118,,153,133,139,155,157,222,226,77,136
1619866843866,33,28,27,34,32,72,73,113,112,137,145,131,135,156,157,222,226,72,133,33,28,27,34,32,72,73,113,112,137,145,131,135,156,157,222,226,72,133
1619866843977,24,18,19,25,25,64,62,108,103,132,131,123,125,152,158,221,225,63,124,24,18,19,25,25,64,62,108,103,132,131,123,125,152,158,221,225,63,124
1619866844100,18,10,9,19,13,55,54,102,98,132,129,115,117,153,158,217,225,54,116,18,10,9,19,13,55,54,102,98,132,129,115,117,153,158,217,225,54,116
1619866844231,6,2,1,8,9,49,48,85,87,121,116,114,107,154,151,216,224,48,110,6,2,1,8,9,49,48,85,87,121,116,114,107,154,151,216,224,48,110
1619866844345,1,-2,-3,2,2,40,39,79,78,111,109,104,107,155,148,216,221,39,105,1,-2,-3,2,2,40,39,79,78,111,109,104,107,155,148,216,221,39,105
1619866844454,0,-5,-5,-2,-4,36,35,75,74,103,102,103,99,154,150,218,223,35,101,0,-5,-5,-2,-4,36,35,75,74,103,102,103,99,154,150,218,223,35,101
1619866844553,-1,-5,-5,-4,-2,29,29,70,69,99,98,99,95,152,150,217,226,29,97,-1,-5,-5,-4,-2,29,29,70,69,99,98,99,95,152,150,217,226,29,97
1619866844667,-1,-6,-6,-5,-3,28,27,67,69,96,96,92,90,151,149,216,226,27,91,-1,-6,-6,-5,-3,28,27,67,69,96,96,92,90,151,149,216,226,27,91
1619866844788,0,-4,-4,-3,-1,28,26,65,68,91,94,90,91,151,151,215,223,27,90,0,-4,-4,-3,-1,28,26,65,68,91,94,90,91,151,151,215,223,27,90
1619866844915,-1,-5,-6,-3,-2,28,26,64,66,91,91,91,88,152,152,218,222,27,89,-1,-5,-6,-3,-2,28,26,64,66,91,91,91,88,152,152,218,222,27,89
1619866845023,-1,-5,-5,-3,-2,28,27,64,66,91,93,90,87,152,155,216,223,27,88,-1,-5,-5,-3,-2,28,27,64,66,91,93,90,87,152,155,216,223,27,88
1619866845145,-1,-5,-5,-3,-2,29,27,64,66,92,93,88,91,153,151,216,219,28,89,-1,-5,-5,-3,-2,29,27,64,66,92,93,88,91,153,151,216,219,28,89
1619866845253,0,-5,-5,-3,-1,28,27,65,66,95,93,89,86,152,153,217,220,27,87,0,-5,-5,-3,-1,28,27,65,66,95,93,89,86,152,153,217,220,27,87
1619866845375,0,-5,-5,-3,-1,28,27,64,64,94,93,89,87,153,153,218,220,27,88,0,-5,-5,-3,-1,28,27,64,64,94,93,89,87,153,153,218,220,27,88
1619866845492,0,-5,-5,-3,-1,28,27,63,65,91,92,89,87,153,153,217,220,27,88,0,-5,-5,-3,-1,28,27,63,65,91,92,89,87,153,153,217,220,27,88
1619866845621,-1,-5,-5,-4,-2,28,27,63,65,91,93,86,89,153,153,217,220,27,87,-1,-5,-5,-4,-2,28,27,63,65,91,93,86,89,153,153,217,220,27,87
1619866845762,0,-5,-5,-4,-2,28,26,62,64,90,90,88,86,152,153,218,220,27,87,0,-5,-5,-4,-2,28,26,62,64,90,90,88,86,152,153,218,220,27,87
1619866845883,0,-5,-5,-3,-1,28,27,64,65,91,91,89,87,151,152,216,220,27,88,0,-5,-5,-3,-1,28,27,64,65,91,91,89,87,151,152,216,220,27,88
1619866846017,0,-5,-5,-4,-1,28,26,64,65,92,92,91,88,153,152,216,219,27,89,0,-5,-5,-4,-1,28,26,64,65,92,92,91,88,153,152,216,219,27,89
1619866846211,0,-4,-4,-3,-1,28,27,65,67,95,95,88,91,153,155,218,222,27,89,0,-4,-4,-3,-1,28,27,65,67,95,95,88,91,153,155,218,222,27,89
1619866846347,0,-5,-5,-3,-1,28,27,65,67,96,95,88,91,153,156,217,220,27,89,0,-5,-5,-3,-1,28,27,65,67,96,95,88,91,153,156,217,220,27,89
1619866846469,0,-4,-4,-3,-1,28,27,66,66,95,92,87,90,153,152,216,220,27,88,0,-4,-4,-3,-1,28,27,66,66,95,92,87,90,153,152,216,220,27,88
1619866846590,0,-5,-5,-3,-2,28,26,64,65,92,92,88,86,152,152,217,221,27,87,0,-5,-5,-3,-2,28,26,64,65,92,92,88,86,152,152,217,221,27,87
1619866846705,0,-5,-5,-2,-1,28,26,64,64,92,91,86,89,152,152,218,220,27,87,0,-5,-5,-2,-1,28,26,64,64,92,91,86,89,152,152,218,220,27,87
1619866846813,-1,-6,-6,-4,-3,28,25,65,64,95,92,85,89,152,152,218,220,26,87,-1,-6,-6,-4,-3,28,25,65,64,95,92,85,89,152,152,218,220,26,87
1619866846931,-2,-6,-6,-4,-3,28,25,65,64,96,90,86,89,152,152,218,219,26,87,-2,-6,-6,-4,-3,28,25,65,64,96,90,86,89,152,152,218,219,26,87
1619866847032,-1,-5,-6,-4,-3,27,24,63,63,94,91,84,88,151,149,217,220,25,86,-1,-5,-6,-4,-3,27,24,63,63,94,91,84,88,151,149,217,220,25,86
1619866847132,-1,-6,-6,-2,-3,26,23,62,63,91,90,84,87,151,149,217,220,24,85,-1,-6,-6,-2,-3,26,23,62,63,91,90,84,87,151,149,217,220,24,85
1619866847265,-2,-6,-6,-2,-3,26,22,61,63,90,90,87,85,151,151,217,220,24,86,-2,-6,-6,-2,-3,26,22,61,63,90,90,87,85,151,151,217,220,24,86
1619866847398,-2,-6,-6,,-3,25,22,59,63,89,90,87,85,152,150,217,220,23,86,-2,-6,-6,,-3,25,22,59,63,89,90,87,85,152,150,217,220,23,86
1619866847517,-2,-6,-6,,-3,24,22,58,63,89,90,87,85,150,149,218,220,23,86,-2,-6,-6,,-3,24,22,58,63,89,90,87,85,150,149,218,220,23,86
1619866847630,-2,-6,-6,,-3,25,23,60,63,89,90,87,84,149,149,217,220,24,85,-2,-6,-6,,-3,25,23,60,63,89,90,87,84,149,149,217,220,24,85
1619866847738,-2,-6,-6,-2,-2,26,24,62,65,93,92,87,89,150,149,218,220,25,88,-2,-6,-6,-2,-2,26,24,62,65,93,92,87,89,150,149,218,220,25,88
1619866847851,-1,-7,-6,-3,-2,28,28,68,70,96,97,92,90,150,151,215,222,28,91,-1,-7,-6,-3,-2,28,28,68,70,96,97,92,90,150,151,215,222,28,91
1619866847966,0,-4,-4,-3,0,33,30,73,71,106,102,96,93,151,154,217,222,31,94,0,-4,-4,-3,0,33,30,73,71,106,102,96,93,151,154,217,222,31,94
1619866848073,4,0,0,1,-1,39,37,78,77,110,109,99,96,153,152,217,220,38,97,4,0,0,1,-1,39,37,78,77,110,109,99,96,153,152,217,220,38,97
1619866848169,25,19,20,13,12,44,45,81,80,120,115,99,99,157,154,217,220,44,99,25,19,20,13,12,44,45,81,80,120,115,99,99,157,154,217,220,44,99
1619866848275,36,28,29,24,25,50,48,86,82,124,117,103,102,156,155,217,218,49,102,36,28,29,24,25,50,48,86,82,124,117,103,102,156,155,217,218,49,102
1619866848386,53,45,45,38,38,57,54,101,93,137,124,107,106,155,151,214,218,55,106,53,45,45,38,38,57,54,101,93,137,124,107,106,155,151,214,218,55,106
1619866848494,68,61,63,,,65,64,107,103,143,131,111,108,157,150,215,218,64,109,68,61,63,,,65,64,107,103,143,131,111,108,157,150,215,218,64,109
1619866848606,85,73,73,63,,79,75,113,107,,137,,110,156,152,215,219,77,110,85,73,73,63,,79,75,113,107,,137,,110,156,152,215,219,77,110
1619866848710,100,95,95,,,86,82,155,104,,140,,115,156,157,217,220,84,115,100,95,95,,,86,82,155,104,,140,,115,156,157,217,220,84,115
1619866848825,103,100,100,,,92,87,156,104,148,,,120,156,155,216,218,89,120,103,100,100,,,92,87,156,104,148,,,120,156,155,216,218,89,120
1619866848928,,,,,,94,92,161,122,150,,,117,158,155,217,218,93,115,,,,,,94,92,161,122,150,,,117,158,155,217,218,93,115
1619866849057,,,,,,99,95,160,97,149,,,118,157,157,216,217,97,115,,,,,,99,95,160,97,149,,,118,157,157,216,217,97,115
1619866849183,,,104,,,93,90,158,97,,,,117,157,154,217,217,91,115,,,104,,,93,90,158,97,,,,117,157,154,217,217,91,115
1619866849299,102,98,98,,,89,84,157,102,,139,,118,157,150,216,218,86,116,102,98,98,,,89,84,157,102,,139,,118,157,150,216,218,86,116
1619866849411,96,93,92,,,85,80,141,99,,139,,117,156,152,215,219,82,116,96,93,92,,,85,80,141,99,,139,,117,156,152,215,219,82,116
1619866849524,84,76,77,66,,78,77,113,107,,139,110,111,156,151,216,219,77,110,84,76,77,66,,78,77,113,107,,139,110,111,156,151,216,219,77,110
1619866849636,70,63,63,56,57,75,73,110,108,,141,,110,154,150,214,220,74,110,70,63,63,56,57,75,73,110,108,,141,,110,154,150,214,220,74,110
1619866849745,53,44,44,41,41,69,67,109,104,,137,110,108,154,149,215,218,68,109,53,44,44,41,41,69,67,109,104,,137,110,108,154,149,215,218,68,109
1619866849852,37,31,31,29,31,60,57,104,97,139,132,113,111,155,150,214,218,58,112,37,31,31,29,31,60,57,104,97,139,132,113,111,155,150,214,218,58,112
1619866849961,27,21,22,22,23,54,52,100,92,135,130,111,112,154,152,214,218,53,111,27,21,22,22,23,54,52,100,92,135,130,111,112,154,152,214,218,53,111
1619866850074,16,10,9,17,12,48,47,97,86,130,122,104,107,155,150,214,220,47,105,16,10,9,17,12,48,47,97,86,130,122,104,107,155,150,214,220,47,105
1619866850195,5,1,1,7,7,46,45,86,80,124,116,101,104,154,151,214,221,45,102,5,1,1,7,7,46,45,86,80,124,116,101,104,154,151,214,221,45,102
1619866850298,-1,-4,-5,1,0,40,39,79,77,111,107,100,99,154,152,214,222,39,99,-1,-4,-5,1,0,40,39,79,77,111,107,100,99,154,152,214,222,39,99
1619866850417,0,-4,-4,0,,37,36,75,75,104,104,98,98,153,150,216,224,36,98,0,-4,-4,0,,37,36,75,75,104,104,98,98,153,150,216,224,36,98
1619866850520,0,-4,-4,0,0,33,32,70,70,98,96,91,91,151,151,216,225,32,91,0,-4,-4,0,0,33,32,70,70,98,96,91,91,151,151,216,225,32,91
1619866850634,0,-4,-4,0,0,30,32,66,67,94,91,89,89,152,154,216,221,31,89,0,-4,-4,0,0,30,32,66,67,94,91,89,89,152,154,216,221,31,89
1619866850747,0,-3,-3,2,1,30,31,66,66,92,90,89,89,151,147,216,221,30,89,0,-3,-3,2,1,30,31,66,66,92,90,89,89,151,147,216,221,30,89
1619866850854,0,-3,-4,2,0,31,31,67,67,94,91,87,88,150,148,217,221,31,87,0,-3,-4,2,0,31,31,67,67,94,91,87,88,150,148,217,221,31,87
1619866850968,0,-3,-4,-1,0,31,29,67,66,96,96,87,89,150,144,217,220,30,88,0,-3,-4,-1,0,31,29,67,66,96,96,87,89,150,144,217,220,30,88
1619866851067,-1,-5,-6,-1,-2,32,28,69,65,98,88,86,89,151,147,218,220,30,87,-1,-5,-6,-1,-2,32,28,69,65,98,88,86,89,151,147,218,220,30,87
1619866851178,-1,-6,-6,-1,-2,31,27,67,64,98,88,91,89,153,152,219,220,29,90,-1,-6,-6,-1,-2,31,27,67,64,98,88,91,89,153,152,219,220,29,90
1619866851289,-2,-7,-7,-2,-3,29,28,68,60,99,86,90,88,153,149,217,219,28,89,-2,-7,-7,-2,-3,29,28,68,60,99,86,90,88,153,149,217,219,28,89
1619866851410,0,-5,-4,,-4,27,23,65,58,98,84,89,85,148,144,218,211,25,87,0,-5,-4,,-4,27,23,65,58,98,84,89,85,148,144,218,211,25,87
1619866851523,-2,-6,-5,-2,-8,25,22,61,57,97,84,87,83,146,144,214,209,23,85,-2,-6,-5,-2,-8,25,22,61,57,97,84,87,83,146,144,214,209,23,85
1619866851645,-2,-7,-7,-5,-6,20,19,57,54,89,84,84,79,149,140,216,211,19,81,-2,-7,-7,-5,-6,20,19,57,54,89,84,84,79,149,140,216,211,19,81
1619866851759,-5,-11,-11,-11,-10,16,14,51,54,87,85,82,80,149,143,216,217,15,81,-5,-11,-11,-11,-10,16,14,51,54,87,85,82,80,149,143,216,217,15,81
1619866851877,,,,,,3,4,45,49,77,82,74,69,147,139,215,220,3,71,,,,,,3,4,45,49,77,82,74,69,147,139,215,220,3,71
1619866851999,,,,,,-7,-6,39,36,74,76,64,62,146,133,211,221,,63,,,,,,-7,-6,39,36,74,76,64,62,146,133,211,221,,63
1619866852112,,,,,,,-11,21,21,56,47,52,54,132,129,207,221,,53,,,,,,,-11,21,21,56,47,52,54,132,129,207,221,,53
1619866852217,,,,,,,,13,,44,27,38,40,117,139,211,220,,39,,,,,,,,13,,44,27,38,40,117,139,211,220,,39
1619866852318,,,,,,,,8,,40,14,37,36,109,137,216,221,,36,,,,,,,,8,,40,14,37,36,109,137,216,221,,36
1619866852431,,,,,,,,-2,,37,11,30,29,103,136,224,226,,29,,,,,,,,-2,,37,11,30,29,103,136,224,226,,29
1619866852545,,,,,,,,,,22,9,,,93,,223,221,,20,,,,,,,,,,22,9,,,93,,223,221,,20
1619866852660,,,,,,,,,,,3,1,3,72,,,,,2,,,,,,,,,,,3,1,3,72,,,,,2
1619866852775,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
1619866852893,,,,,,,,74,,,,,,,,,,,,,,,,,,,74,,,,,,,,,,,
1619866852997,,,,,,,,,,,,,-14,,,,,,,,,,,,,,,,,,,-14,,,,,,
1619866853111,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
1619866853225,,,,,,,,53,,,,,,,,,,60,,,,,,,,,53,,,,,,,,,,60,
1619866853334,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,,
1619866853445,,,,,,,,,,,,,,,,258,,97,211,,,,,,,,,,,,,,,,258,,97,211
1619866853556,,,,,,,,,,,,,,,,,,,99,,,,,,,,,,,,,,,,,,,99
1619866853664,,,,,,,,,,,,,,,,,,6,191,,,,,,,,,,,,,,,,,,6,191
1619866853770,,,,,,,,,,,,,,,,,,67,190,,,,,,,,,,,,,,,,,,67,190
1619866853889,,,,,,,,,,,,,,,,,,13,260,,,,,,,,,,,,,,,,,,13,260
1619866854028,,,,,,,,,147,,,,,,,,,43,176,,,,,,,,,147,,,,,,,,,43,176
1619866854139,,,,,,,,,,,,,,,,,,86,259,,,,,,,,,,,,,,,,,,86,259
1619866854254,,,,,,,,,,,,,,,,,,119,259,,,,,,,,,,,,,,,,,,119,259
1619866854373,,,26,,,,138,,,,,,,,,,,136,266,,,26,,,,138,,,,,,,,,,,136,266
1619866854473,60,46,43,,73,,159,,,,,,,,,,,157,244,60,46,43,,73,,159,,,,,,,,,,,157,244
""")
with open('test_data.txt', 'w') as fd:
data.seek(0)
shutil.copyfileobj(data, fd)
# f= open("test_data.txt","w+")
# f.write(data)
# + [markdown] id="aLaPfyMopFjM"
# #**Loading Pose detection output of Squat exercise**
#
#
# + [markdown] id="7DLfa3oApCXB"
#
# + colab={"base_uri": "https://localhost:8080/", "height": 750} id="OEtqlcwKw597" outputId="48591a22-eb8f-4930-a8da-bc7a6ce2e5e9"
keypoints = ['NOSE','LEFT_EYE','RIGHT_EYE','LEFT_EAR','RIGHT_EAR','LEFT_SHOULDER','RIGHT_SHOULDER','LEFT_ELBOW','RIGHT_ELBOW','LEFT_WRIST','RIGHT_WRIST','LEFT_HIP','RIGHT_HIP','LEFT_KNEE','RIGHT_KNEE','LEFT_ANKLE','RIGHT_ANKLE']
keypoints_x, keypoints_y = [], []
for points in keypoints:
keypoints_y.append(points + "_y")
keypoints_x.append(points + "_x")
df_header = ['TIME'] + keypoints_y + keypoints_x
df = pd.read_csv("test_data.txt", index_col=False, names=df_header)
print(df.head())
def normalize(x):
if (pd.isnull(x)):
return x
return int(int(x))
df["TIME"] = df["TIME"].apply(int)
for i in df.columns[1:]:
df[i] = df[i].apply(normalize)
plt.figure(figsize=(20,10))
for i in df.columns[1:]:
plt.plot(df["TIME"], df[i], label=str(i))
plt.legend(loc=(1.04,0))
original_df = df
# + [markdown] id="Ej40YkH9pRTb"
# #**Write code to count squat reps and detect wrong reps in which the person did not go enough down (small peaks) while doing the exercise.**
# + [markdown] id="j9Klztkk3ifr"
# We can see from the above plot that the 'humps' are basically the moments where the person completed a SQUAT. So thereby for counting the total squats we need to formulate a code which counts the number of 'humps' which would be further grouped into two clusters, "proper reps" and "wrong reps". And for counting the number of 'humps' its enough to count the total number of peaks.
# + [markdown] id="sNpnivMJ45ma"
# # There's a simple approach to this
# + [markdown] id="R-rRyumuZgEj"
# Let us take in account the y-keypoints ( Nose, eyes... their y-keypoints change drastically during a 'squat' ) cause it gives a sense of vertical variations. Therefore a change in y-keypoints means that a person performed a 'squat'. We'll take the feature which has the least number of missing values, then we will remove the non-periodic part of the 'waveform' and treat it as outliers. After we are done with removing the outliers we will interpolate the missing datas and after that we can find the peaks in the data. Every peak resembles that a squat is performed. Then we will use kmeans to cluster the peaks into 'proper reps' and 'wrong reps'.
# + id="4NmD5Sb0l-wz" colab={"base_uri": "https://localhost:8080/"} outputId="9e7d12e9-0d4c-49ea-daa1-df5560e18ccc"
# taking the y-keypoints
new_df = df.iloc[:, df.columns.str.endswith('_y')]
new_df = pd.concat([df['TIME'], new_df], axis = 1)
# Missing values
new_df.isnull().sum()
# + [markdown] id="KfgqtRqCmL20"
# # Nose Keypoints
# + colab={"base_uri": "https://localhost:8080/", "height": 374} id="A9-EAR_ui0mb" outputId="1338891c-5a64-4e07-e07a-7bf7e368eee4"
KEY = 'NOSE_y'
plt.figure(figsize=(8,6))
peaks, _ = find_peaks(new_df[KEY], height=10)
plt.plot(new_df[KEY])
plt.plot(peaks, np.array(new_df[KEY])[peaks], "x")
plt.plot(np.zeros_like(new_df[KEY]), "--", color="gray")
plt.show()
# we can see the last hump did not have a peak because it had missing data
# + [markdown] id="6ER75NWHmgnj"
# # Eye
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="eGQ-qF_Bipwr" outputId="573875ba-db4e-4dce-ee48-632db85469f6"
KEY = 'LEFT_EYE_y'
plt.figure(figsize=(20,6))
plt.subplot(1, 2, 1)
peaks, _ = find_peaks(new_df[KEY], height=1)
plt.plot(new_df[KEY])
plt.plot(peaks, np.array(new_df[KEY])[peaks], "x")
plt.plot(np.zeros_like(new_df[KEY]), "--", color="gray")
plt.title(KEY)
KEY = 'RIGHT_EYE_y'
peaks, _ = find_peaks(new_df[KEY], height=10)
plt.subplot(1, 2, 2)
plt.plot(new_df[KEY])
plt.plot(peaks, np.array(new_df[KEY])[peaks], "x")
plt.plot(np.zeros_like(new_df[KEY]), "--", color="gray")
plt.title(KEY)
plt.show()
# + [markdown] id="b6aAaX8HnPI9"
# # Ears
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="d3FrWGp_mneS" outputId="9582b2bc-80e3-484c-9e9d-e6a4099d365e"
KEY = 'LEFT_EAR_y'
plt.figure(figsize=(20,6))
plt.subplot(1, 2, 1)
peaks, _ = find_peaks(new_df[KEY], height=1)
plt.plot(new_df[KEY])
plt.plot(peaks, np.array(new_df[KEY])[peaks], "x")
plt.plot(np.zeros_like(new_df[KEY]), "--", color="gray")
plt.title(KEY)
KEY = 'RIGHT_EAR_y'
peaks, _ = find_peaks(new_df[KEY], height=10)
plt.subplot(1, 2, 2)
plt.plot(new_df[KEY])
plt.plot(peaks, np.array(new_df[KEY])[peaks], "x")
plt.plot(np.zeros_like(new_df[KEY]), "--", color="gray")
plt.title(KEY)
plt.show()
# + [markdown] id="IRJb_Wf_t2XC"
# # Hips
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="4dJVM-M-nesm" outputId="6d010de6-a495-4231-92e9-b7d930a90c52"
KEY = 'LEFT_HIP_y'
THRES_HEIGHT = 110
plt.figure(figsize=(20,6))
plt.subplot(1, 2, 1)
peaks, _ = find_peaks(new_df[KEY], height=THRES_HEIGHT, distance=10)
plt.plot(new_df[KEY])
plt.plot(peaks, np.array(new_df[KEY])[peaks], "x")
plt.plot(np.array([THRES_HEIGHT]*450), "--", color="gray")
plt.title(KEY)
KEY = 'RIGHT_HIP_y'
peaks, _ = find_peaks(new_df[KEY], height=THRES_HEIGHT, distance=10)
plt.subplot(1, 2, 2)
plt.plot(new_df[KEY])
plt.plot(peaks, np.array(new_df[KEY])[peaks], "x")
plt.plot(np.array([THRES_HEIGHT]*450), "--", color="gray")
plt.title(KEY)
plt.show()
# + [markdown] id="rJyOTfGJt92b"
# We can keep on analysing each graph but since Nose has the least number of missing values, we'll proceed with that. After we are done building the code, we will test the validity of the code with other keypoint as input (LEFT_EYE_y). If it produces the same result means the approach is apt.
# + colab={"base_uri": "https://localhost:8080/", "height": 788} id="bMXoSeIZ5Yz_" outputId="3ea3dbfb-ce02-4dc9-879d-e1f9c2bf0cf1"
# boxplot for pointing outliers
plt.figure(figsize=(12,6))
sns.boxplot(x=new_df['NOSE_y'])
plt.xticks(np.arange(-10, 200, 10))
# + colab={"base_uri": "https://localhost:8080/"} id="3AwQLE05oRIL" outputId="9f03859e-6ef2-40bd-d1c8-85c3d21a99a7"
# to remove outliers
print(np.where(new_df[KEY]<130)[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 483} id="2QVTYLNH2zzq" outputId="d646a6cd-2b21-4134-fa7f-d2200537a773"
KEY = 'NOSE_y'
plt.figure(figsize=(12, 8))
itpdf = pd.DataFrame(np.array(new_df[KEY])[np.where(new_df[KEY]<130)[0]]).interpolate(method='quadratic')
peaks, _ = find_peaks(itpdf.iloc[:, -1].tolist(), height=10)
plt.plot(itpdf)
plt.plot(peaks, np.array(itpdf)[peaks], "x")
plt.plot(np.zeros_like(new_df[KEY]), "--", color="gray")
plt.show()
# after interpolation
# the red cross resembles peak points
# + id="s3UNP3zuA_rh" colab={"base_uri": "https://localhost:8080/", "height": 394} outputId="ad939cb9-cacb-41fc-a0d0-3bb194a90a17"
# clustering the peaks into 'proper reps' and 'wrong reps'
arr = np.array(itpdf.iloc[:, -1].tolist())[peaks]
kmeans = KMeans(n_clusters = 2)
preds = kmeans.fit_predict(arr.reshape(-1, 1))
df = pd.DataFrame(
{
'y_value': arr,
'cluster': preds
}
)
df
# + colab={"base_uri": "https://localhost:8080/"} id="S9bbaGydr7N9" outputId="fec394bf-5988-4b91-ec58-893bc6f49b27"
cluster0_mean = df[df['cluster']==0]['y_value'].mean()
cluster1_mean = df[df['cluster']==1]['y_value'].mean()
if cluster1_mean > cluster0_mean:
prop_reps = df[df['cluster']==1]['y_value'].count()
wrong_reps = df[df['cluster']==0]['y_value'].count()
print('Number of Proper Squat Reps : {}\nNumber of Wrong Squat Reps : {}\n\nNumber of Total Squat Reps : {}'.format(prop_reps, wrong_reps, len(df)))
else:
prop_reps = df[df['cluster']==0]['y_value'].count()
wrong_reps = df[df['cluster']==1]['y_value'].count()
print('Number of Proper Squat Reps : {}\nNumber of Wrong Squat Reps : {}\n\nNumber of Total Squat Reps : {}'.format(prop_reps, wrong_reps, len(df)))
# + [markdown] id="gB2RCyQKuody"
# # Testing the code with taking ' LEFT_EYE_y ' as input
# + id="s72C9pS9ULba" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="49e9c8b1-100b-4034-ae1e-57d3b2a8a287"
plt.figure(figsize=(20,6))
sns.boxplot(x=new_df['LEFT_EYE_y'])
plt.xticks(np.arange(-10, 200, 5))
# + colab={"base_uri": "https://localhost:8080/", "height": 574} id="XDfLzdlduLCS" outputId="2fef1fec-034c-419e-e94e-e0f3b4513173"
KEY = 'LEFT_EYE_y'
plt.figure(figsize=(12, 8))
itpdf = pd.DataFrame(np.array(new_df[KEY])[np.where(new_df[KEY]<116)[0]]).interpolate(method='quadratic')
peaks, _ = find_peaks(itpdf.iloc[:, -1].tolist(), height=10)
plt.plot(itpdf)
plt.plot(peaks, np.array(itpdf)[peaks], "x")
plt.plot(np.zeros_like(new_df[KEY]), "--", color="gray")
plt.show()
arr = np.array(itpdf.iloc[:, -1].tolist())[peaks]
kmeans = KMeans(n_clusters = 2)
preds = kmeans.fit_predict(arr.reshape(-1, 1))
df = pd.DataFrame(
{
'y_value': arr,
'cluster': preds
}
)
cluster0_mean = df[df['cluster']==0]['y_value'].mean()
cluster1_mean = df[df['cluster']==1]['y_value'].mean()
if cluster1_mean > cluster0_mean:
prop_reps = df[df['cluster']==1]['y_value'].count()
wrong_reps = df[df['cluster']==0]['y_value'].count()
print('\nNumber of Proper Squat Reps : {}\nNumber of Wrong Squat Reps : {}\n\nNumber of Total Squat Reps : {}'.format(prop_reps, wrong_reps, len(df)))
else:
prop_reps = df[df['cluster']==0]['y_value'].count()
wrong_reps = df[df['cluster']==1]['y_value'].count()
print('\nNumber of Proper Squat Reps : {}\nNumber of Wrong Squat Reps : {}\n\nNumber of Total Squat Reps : {}'.format(prop_reps, wrong_reps, len(df)))
# + [markdown] id="5VqtLjc6-hTU"
# # We got the same results
| CountSquats.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exploratory Data Analysis (EDA)
# +
#Import the Libraries : pandas , numpy, seaborn, matplotlib.pyplot , scipy.stats
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
import scipy.stats as st
# Import warnings library and set it to ignore[so as to ignore warnings]
import warnings
warnings.filterwarnings('ignore')
#Display all the columns of the Dataframe
pd.pandas.set_option('display.max_columns',None)
#Display all Numbers of float datatype as numbers with 2 decimal places
pd.options.display.float_format = "{:.2f}".format
#from SciKit Learn import Models and metrics
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import ensemble
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import explained_variance_score
# -
df = pd.read_csv('data.csv')
# Verify that result of SQL query is stored in the DataFrame and Print the top 20 records
df.head(20)
#display information : number of rows and columns, Column or Feature name
#the total memory usage, the data type of each column, and the number of non-Null elements.
df.info()
# ## Observation (from above two Displayed Tables ) that there are :
# ## a) 2 Object DataTypes ('condition' Column is Categorical and "date" is a Date-Time Variable but an Object or String datatype and the last four digit is Year in YYYY format).We will convert that into int and then to float datatype
# ## b) 16 Numerical DataTypes ( of which "id"'s value does not matter and 'built' and 'renovation' and actually Year which is a Time or Temporal Variable)
# ## c)Home Price or 'price' which is the Target Variable for prediction later , has only 21871 non-null values , hence we can safely drop all other values or null values from all rows from all features or variables.
# # 1. Data Cleaning and Preparation
# +
#We drop 'id' as it will not be used in the Analysis
df = df.drop('id',axis=1)
#we put in the df DataFrame only those values for all columns , for which 'price' Column has non-null or non-na values
#i.e. we execute on observation c) above
df = df[df['price'].notna()]
#get the unique vales of Column 'condition'
df['condition'].unique()
# -
#convert all lower case in 'condition' column as fair = FAIR , same is true for others , except none , which we will
#handle in the Data Prep stage
df['condition'] = [str(i).lower() for i in df['condition']]
# check the unique values
df['condition'].unique()
#we replace the 'condition' of the house with numerical rating ['excellent', 'good','fair','poor','terrible','none']
#with [5,4,3,2,1,0]
df['condition'] = df['condition'].replace(to_replace =['excellent', 'good','fair','poor','terrible','none'],
value =[5.0,4.0,3.0,2.0,1.0,0.0])
# +
#convert string or object number to numerical or int datatype
df['condition']=pd.to_numeric(df['condition'], errors='coerce').fillna(0, downcast='infer')
#convert to float datatype
df['condition']=df['condition'].astype(float)
# check the unique values in the DataFrame Column 'condition'
df['condition'].unique()
# +
#get the last 4 characters ie Year (YYYY) of the 'date' Column
df['date']=df['date'].str[-4:]
#get the unique values of date column
df['date'].unique()
# +
#convert string or object number to numerical or int datatype
df['date']=pd.to_numeric(df['date'], errors='coerce').fillna(0, downcast='infer')
#convert to float datatype
df['date']=df['date'].astype(float)
#get the unique values of date column
df['date'].unique()
# -
#get the unique values of 'built' or Year Built column
df['built'].unique()
# Number of unique 'built' values
len(df['built'].unique())
#get the unique values of 'renovation' or Renovation Year column
df['renovation'].unique()
# Number of unique 'renovation' values
len(df['renovation'].unique())
# ## Observation 1a)
# We observe from the Above 3 Temporal Variables : Date or Date of Sale , Year Built and Renovation Year Unique Values that Date has only two years (viz. 2014 and 2015) while Year Built and Renovation Year has a wide range since early 1900s to 2000s .
#display information : number of rows and columns, Column or Feature name
#the total memory usage, the data type of each column, and the number of non-Null elements.
df.info()
#descriptive statistics summary of all Numerical Features or Variables .
df.describe()
# # Observation 1b)
# We Observe from the above table in the House Price/"price" Column or Variable that the max value is 7.7 million and minimum value is 75k . Let's see if they are Normally Distributed
#Plot normal distribution of House Price
y = df['price']
plt.figure(figsize=(11,11)); plt.title('Normal')
sns.distplot(y, kde=False, fit=st.norm)
print("Skewness: %f" % df['price'].skew())
print("Kurtosis: %f" % df['price'].kurt())
# # Observation 1c)
# Skewness is ameasure of Symmetry or lack thereof of the Distribution . We see from the above plot , that "House Price" doesn't follow normal distribution and has high skewness, deviates from the normal distribution and shows peakedness or high Kurtosis.So we need to Transform the Data before we run our ML Algorithms on it.
#
# # 2. Analysis and Ploting of House Price , Other Numerical ,Temporal Variables and Outliers
# +
# We plot the histogram (count=y-axis vs feature x-axis)for all 17 features to understand how they are distributed and
# whether they are Continuous or Discrete or Outliers
features=df[['date','price', 'bedrooms', 'bathrooms', 'condition', 'floors', 'waterfront', 'view', 'review_score',
'basement_size', 'built', 'renovation', 'zipcode',
'latitude', 'longitude', 'living_room_size', 'lot_size']]
h = features.hist(bins=25,figsize=(16,16),xlabelsize='10',ylabelsize='10',xrot=-15)
sns.despine(left=True, bottom=True)
[x.title.set_size(12) for x in h.ravel()];
[x.yaxis.tick_left() for x in h.ravel()];
# -
# # Observation 2a)
# We observe from the above Histograms that all the Features are Skewed and not normally distrbuted
# +
# Year or Temporal feature (as per Observation d) above)
year_feature = ['date','built','renovation']
# Numerical variables are of 2 types : Continous variable and Discrete Variables . We define discrete feature to
# have less than 25 unique values , otherwise continuous feature
discrete_feature=[f for f in df.columns if len(df[f].unique())<25 and f not in
year_feature]
print("Discrete Variables Count: {}".format(len(discrete_feature)))
continuous_feature=[f for f in df.columns if len(df[f].unique())>=25 and f not in
year_feature]
print("Continuous Variables Count: {}".format(len(continuous_feature)))
# -
# discrete features
discrete_feature
# continuous features
continuous_feature
# +
## Lets analyze the Temporal Date-time or Year Variables
## We will check whether there is a relation between year the house is sold vs House Price
df.groupby('date')['price'].median().plot()
plt.xlabel('Year Sold')
plt.ylabel('Median House Price')
plt.title("Median House Price vs Date of Sale")
# -
#we plot the Scatter Plot of House Price(price) vs Year or Temporal variable/feature
for feature in year_feature:
data=df.copy()
plt.scatter(data[feature],data['price'])
plt.xlabel(feature)
plt.ylabel('price')
plt.show()
# # Observation 2b)
# We observe that 'renovation' has a lot of Zeros and Outliers , so for missing values it needs to be populated my median. And to reduce the number features from 3 to 2 we need to do feature engineering in 3. (next section)
#we plot the median House Price(price) vs Dicrete features variable
for feature in discrete_feature:
data=df.copy()
data.groupby(feature)['price'].median().plot.bar()
plt.xlabel(feature)
plt.ylabel('price')
plt.title(feature)
plt.show()
# # Observation 2c)
# We observe from above plots , a Linear Relationship between median price and Discrete Features except waterfront , view, condition which have a lot of zero outliers
# +
#we plot the Scatter Plot of House Price(price) vs Continuous Features (except Longitude)
continuous_feature1 = continuous_feature
#remove price from continuous feature
continuous_feature1.remove('price')
#remove longitude from continuous feature as it has negative value and throwing an error
continuous_feature1.remove('longitude')
for feature in continuous_feature1:
data=df.copy()
data[feature]=np.log(data[feature])
data['price']=np.log(data['price'])
plt.scatter(data[feature],data['price'])
plt.xlabel(feature)
plt.ylabel('price')
plt.title(feature)
plt.show()
# -
#taking the scatter plot of 'price' vs 'longitude'
data=df.copy()
data['longitude']=np.log(data[feature].abs())
data['price']=np.log(data['price'])
plt.scatter(data['longitude'],data['price'])
plt.xlabel('longitude')
plt.ylabel('price')
plt.title('longitude')
plt.show()
# +
#we plot the Boxplot of all continuous features to see which features have outliers
for feature in continuous_feature1:
data=df.copy()
data[feature]=np.log(data[feature])
data.boxplot(column=feature)
plt.ylabel(feature)
plt.title(feature)
plt.show()
# -
#taking the box plot of 'longitude'
data=df.copy()
data['longitude']=np.log(data['longitude'].abs())
data.boxplot(column='longitude')
plt.ylabel('longitude')
plt.title('longitude')
plt.show()
# # Observation 2d)
# We observe from series of scatterplots,boxplots above and histogram (beginning plot of all variables) of Continuous Features that there is essentially a Linear Relationship between price and Continuous Features with many outliers
# # 3. Missing Values and Feature Engineering
# +
## In Missing Values we check the percentage of nan values present in each feature (all are numerical features)
data=df.copy()
## Step 1 make the list of features which has missing values
features_with_na=[features for features in df.columns if data[features].isnull().sum()>1]
## Step 2 print the feature name and the percentage of missing values
for feature in features_with_na:
print(feature, np.round(data[feature].isnull().mean()*100, 2), '% missing values')
# -
# from Observation 2a)-d) we conclude that we will fill all the nan or null values with median
df = df.fillna(df.median())
## Temporal Variables (Date Time Variables) Feature Engineering as per Observation 2b)
for feature in ['built','renovation']:
df[feature]=df['date']-df[feature]
#We Drop the date Column as we have already performed Feature Engineering on it
df = df.drop('date',axis=1)
#display information : number of rows and columns, Column or Feature name
#the total memory usage, the data type of each column, and the number of non-Null elements.
df.info()
df.head(20)
# # 4. Regression Algorithms to Predict
# train(=Independent variables) and label(=target variable)
label = df['price']
train = df.drop(['price'],axis=1)
# train and test split
x_train , x_test , y_train , y_test = train_test_split(train , label , test_size = 0.15,random_state =2)
# Model - Multiple Linear Regression
mlr = LinearRegression()
# Run fit fuction
mlr.fit(x_train,y_train)
#R Squared Score
mlr_score=mlr.score(x_test,y_test)
#Run Predict on test set
pred_mlr = mlr.predict(x_test)
#get explained variance (measure of Accuracy of Prediction)
expl_mlr = explained_variance_score(pred_mlr,y_test)
#Model - ensemble - Gradient Boosting
gbm = ensemble.GradientBoostingRegressor(n_estimators = 400, max_depth = 5, min_samples_split = 2,
learning_rate = 0.1, loss = 'ls')
#run fit
gbm.fit(x_train, y_train)
#R Squared Score
gbm_score = gbm.score(x_test,y_test)
#Run Predict on test set
pred_gbm = gbm.predict(x_test)
#get explained variance (measure of Accuracy of Prediction)
expl_gbm = explained_variance_score(pred_gbm,y_test)
# Decision Tree Algorthm
tr_regressor = DecisionTreeRegressor(random_state=0)
#run fit function
tr_regressor.fit(x_train,y_train)
#R Squared Score
decision_score=tr_regressor.score(x_test,y_test)
#Run Predict on test set
pred_tr = tr_regressor.predict(x_test)
#get explained variance (measure of Accuracy of Prediction)
expl_tr = explained_variance_score(pred_tr,y_test)
# # 5. Classification Algorithm to Predict
#Random Forest - Classification Algorithm
rf_classifier= RandomForestRegressor(random_state=0)
#run fit function
rf_classifier.fit(x_train,y_train)
#R Squared Score
rf_score = rf_classifier.score(x_test,y_test)
#Run Predict on test set
rf_pred = rf_classifier.predict(x_test)
#get explained variance (measure of Accuracy of Prediction)
expl_rf = explained_variance_score(rf_pred,y_test)
models_score =pd.DataFrame({'Model':['Multiple Linear Regression','Gradient Boosting Method','Decision Tree','Random forest'],
'R-Squared Score':[mlr_score,gbm_score,decision_score,rf_score],
'Explained Variance Score':[expl_mlr,expl_gbm,expl_tr,expl_rf]
})
models_score.sort_values(by='R-Squared Score',ascending=False)
| exploratory-data-analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="PcJDApDcK0O9" colab_type="code" outputId="717a39fb-aea4-4572-c5e1-2f998ae70efc" executionInfo={"status": "ok", "timestamp": 1591691165716, "user_tz": -270, "elapsed": 5517, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 119}
# !git clone https://github.com/parhamzm/Beijing-Pollution-DataSet
# + id="0-KcI9tFLGXP" colab_type="code" outputId="295a5722-b5bf-4e86-e39b-b88c53f61b4b" executionInfo={"status": "ok", "timestamp": 1591691168604, "user_tz": -270, "elapsed": 8375, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
# !ls Beijing-Pollution-DataSet
# + id="9NuzKJEnLNyx" colab_type="code" colab={}
import torch
import torchvision
import torch.nn as nn
from torchvision import transforms
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from torch.utils.data import random_split
from math import sqrt
from numpy import concatenate
from matplotlib import pyplot
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error
from numpy import array
from numpy import hstack
# + [markdown] id="Q_0pmTGAHWmf" colab_type="text"
# # **Data Pre Processing**
# + id="GjcwsJBc9eIG" colab_type="code" outputId="41f58226-44ad-47e5-d402-d0f2e166661f" executionInfo={"status": "ok", "timestamp": 1591691171804, "user_tz": -270, "elapsed": 11549, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 51}
DATA_DIR = "Beijing-Pollution-DataSet/"
from pandas import read_csv
from datetime import datetime
from random import randint
def select_week(sequences, n_samples=250):
X, y = list(), list()
rand_hour = randint(0, 24)
for i in range(0, n_samples):
start_ix = rand_hour + 168 * i # 168 : Week hours!
idxs = []
for j in range(0, 7):
if j <=5:
idx = start_ix + (j * 24) # Add different days in week
idxs.append(idx)
if j == 6: # Target
idy = start_ix + (j * 24)
seq_x = sequences[idxs, :]
seq_y = sequences[idy, 0]
y.append(seq_y)
X.append(seq_x)
return X, y
# split a multivariate sequence into samples
def split_sequences(sequences, n_steps, n_samples=12000, start_from=0):
X, y = list(), list()
for i in range(start_from, (start_from + n_samples)):
# find the end of this pattern
end_ix = i + n_steps
# check if we are beyond the dataset
# if end_ix > len(sequences):
# break
# gather input and output parts of the pattern
seq_x = sequences[i:end_ix, :]
seq_y = sequences[end_ix, 0]
y.append(seq_y)
X.append(seq_x)
return array(X), array(y)
# load dataset
DATA_DIR = "Beijing-Pollution-DataSet/"
data = np.load(DATA_DIR + 'polution_dataSet.npy')
scaled_data = data
x, y = select_week(data, n_samples=260)
print("X shape => ", np.array(x).shape)
print("y shape => ", np.array(y).shape)
x = np.array(x)
y = np.array(y)
dataset = data
train_X, train_y = x[0:210], y[0:210] #split_sequences(dataset, n_timesteps, n_samples=15000, start_from=0)
valid_X, valid_y = x[210:260], y[210:260] #split_sequences(dataset, n_timesteps, n_samples=3000, start_from=15000)
# + id="sIihkChW80JC" colab_type="code" outputId="261b7a7e-42eb-4ab8-c182-e6d93a1c1a64" executionInfo={"status": "ok", "timestamp": 1591691171806, "user_tz": -270, "elapsed": 11535, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 68}
test_loader_X = torch.utils.data.DataLoader(dataset=(train_X), batch_size=20, shuffle=False)
# train_X = torch.tensor(train_X, dtype=torch.float32)
# train_y = torch.tensor(train_y, dtype=torch.float32)
print("Train X Shape :=> ", train_X.shape)
print("Train Y Shape :=> ", train_y.shape)
print("####################################")
# print("Test X Shape :=> ", test_X.shape)
# print("Test Y Shape :=> ", test_y.shape)
# + id="pDpTe3gIYsW4" colab_type="code" colab={}
class LSTM(torch.nn.Module):
def __init__(self, n_features=8, n_output=1, seq_length=11, n_hidden_layers=233, n_layers=1):
super(LSTM, self).__init__()
self.n_features = n_features
self.seq_len = seq_length
self.n_hidden = n_hidden_layers # number of hidden states
self.n_layers = n_layers # number of LSTM layers (stacked)
self.n_output = n_output
self.l_lstm = torch.nn.LSTM(input_size = n_features,
hidden_size = self.n_hidden,
num_layers = self.n_layers,
batch_first = True)
# according to pytorch docs LSTM output is
# (batch_size, seq_len, num_directions * hidden_size)
# when considering batch_first = True
self.l_linear = torch.nn.Linear(self.n_hidden * self.seq_len, self.n_output)
def forward(self, x):
hidden_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_()
cell_state = torch.zeros(self.n_layers, x.size(0), self.n_hidden).requires_grad_()
self.hidden = (hidden_state.detach(), cell_state.detach())
batch_size, seq_len, _ = x.size()
lstm_out, self.hidden = self.l_lstm(x, self.hidden)
# lstm_out(with batch_first = True) is
# (batch_size,seq_len,num_directions * hidden_size)
# for following linear layer we want to keep batch_size dimension and merge rest
# .contiguous() -> solves tensor compatibility error
x = lstm_out.contiguous().view(batch_size, -1)
# print("X shape :=> ", x.shape)
# out = self.l_linear(lstm_out[:, -1, :])
# print("Out Shape :=> ", lstm_out[:, -1, :].shape)
out = self.l_linear(x)
return out
# + id="PCxQTgWnavOJ" colab_type="code" colab={}
torch.manual_seed(13)
model = LSTM(n_features=8, n_output=1, seq_length=6, n_hidden_layers=233, n_layers=1)
criterion = nn.L1Loss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0003)
# + id="oWbmF4vLbJ5N" colab_type="code" outputId="cca564cf-7639-47a8-9141-6581c3b5e127" executionInfo={"status": "ok", "timestamp": 1591691171810, "user_tz": -270, "elapsed": 11507, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 119}
model = model #.to(device)
criterion = criterion #.to(device)
for p in model.parameters():
print(p.numel())
# + id="5R__1vOLzB-i" colab_type="code" outputId="71115d0b-8b51-4443-e2a0-a0721f32e6a5" executionInfo={"status": "ok", "timestamp": 1591691237976, "user_tz": -270, "elapsed": 77656, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
import time
start_time = time.time()
# train_X, train_y
epochs = 200
model.train()
batch_size = 5
running_loss_history = []
val_running_loss_history = []
for epoch in range(epochs):
running_loss = 0.0
val_running_loss = 0.0
model.train()
for b in range(0, len(train_X), batch_size):
inpt = train_X[b:b+batch_size, :, :]
target = train_y[b:b+batch_size]
# print("Input Shape :=> ", inpt.shape)
x_batch = torch.tensor(inpt, dtype=torch.float32)
y_batch = torch.tensor(target, dtype=torch.float32)
output = model(x_batch)
loss = criterion(output.view(-1), y_batch)
running_loss += loss.item()
loss.backward()
optimizer.step()
optimizer.zero_grad()
else:
with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false
model.eval()
for b in range(0, len(valid_X), batch_size):
inpt = valid_X[b:b+batch_size, :, :]
target = valid_y[b:b+batch_size]
x_batch_test = torch.tensor(inpt, dtype=torch.float32)
y_batch_test = torch.tensor(target, dtype=torch.float32)
# model.init_hidden(x_batch_test.size(0))
output_test = model(x_batch_test)
loss_test = criterion(output_test.view(-1), y_batch_test)
val_running_loss += loss_test.item()
val_epoch_loss = val_running_loss / len(valid_X)
val_running_loss_history.append(val_epoch_loss)
epoch_loss = running_loss / len(valid_X)
running_loss_history.append(epoch_loss)
print('step : ' , epoch , ' Train loss : ' , epoch_loss, ', Valid Loss : => ', val_epoch_loss)
print("***->>>-----------------------------------------------<<<-***")
total_time = time.time() - start_time
print("===========================================================")
print("*********************************************************")
print("The total Training Time is Equal with ==> : {0} Sec.".format(total_time))
print("*********************************************************")
print("===========================================================")
# + id="H4jiSUbWu1fQ" colab_type="code" outputId="92565e94-1916-42d5-961b-4368a8e30db8" executionInfo={"status": "ok", "timestamp": 1591691238541, "user_tz": -270, "elapsed": 78207, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 462}
f, ax = plt.subplots(1, 1, figsize=(10, 7))
plt.title("Valid & Test Loss", fontsize=18)
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.plot(running_loss_history, label='train')
plt.plot(val_running_loss_history, label='test')
# pyplot.plot(history.history['val_loss'], label='test')
plt.legend()
plt.show()
# + id="uweASFbTzUrP" colab_type="code" colab={}
test_x, test_y = x[200:], y[200:]
model.eval()
test_x = torch.tensor(test_x, dtype=torch.float32)
test_y = torch.tensor(test_y, dtype=torch.float32)
res = model(test_x)
loss_test = criterion(res.view(-1), test_y)
future = 100
window_size = 11
# + id="8RWHd-TLJkVp" colab_type="code" outputId="bb3ad0d8-a20b-40e0-fa4c-558ef247f9cb" executionInfo={"status": "ok", "timestamp": 1591691239457, "user_tz": -270, "elapsed": 79106, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "<KEY>", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 447}
fig = plt.figure(figsize=(20, 7))
plt.title("Beijing Polution Prediction - LSTM", fontsize=18)
plt.ylabel('Polution')
plt.xlabel('Num data')
plt.grid(True)
plt.autoscale(axis='x', tight=True)
fig.autofmt_xdate()
# plt.plot(data[15000:15100, 0])
plt.plot(test_y, label="Real")
# plt.plot(preds[12:])
print(res.shape)
plt.plot(res.detach().numpy(), label="Prediction")
plt.legend()
plt.show()
# + id="gWZk1LYA7dS_" colab_type="code" outputId="d11d2bdd-f6c8-4f30-c500-5cd67308b9b0" executionInfo={"status": "ok", "timestamp": 1591691239458, "user_tz": -270, "elapsed": 79094, "user": {"displayName": "Micro Artificial Intelligence", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gjd4jgZ1lHNuBBnQK5f7tGnhbrjuQkXvspvHTDj=s64", "userId": "15902609544232068748"}} colab={"base_uri": "https://localhost:8080/", "height": 136}
test_x, test_y = x[50:], y[50:]
model.eval()
test_running_loss = 0
with torch.no_grad(): # it will temprerorerly set all the required grad flags to be false
model.eval()
for b in range(0, len(test_x), batch_size):
inpt = test_x[b:b+batch_size, :, :]
target = test_y[b:b+batch_size]
x_batch_test = torch.tensor(inpt, dtype=torch.float32)
y_batch_test = torch.tensor(target, dtype=torch.float32)
# model.init_hidden(x_batch_test.size(0))
output_test = model(x_batch_test)
loss_test = criterion(output_test.view(-1), y_batch_test)
test_running_loss += loss_test.item()
test_epoch_loss = test_running_loss / len(test_x)
print("##########################################################")
print(">>>>---------------------------------------------------<<<<")
print(">>>>----------***************************--------------<<<<")
print("**** Test Loss :==>>> ", test_epoch_loss)
print(">>>>----------***************************--------------<<<<")
print(">>>>---------------------------------------------------<<<<")
print("##########################################################")
# + [markdown] id="2wUlb4EkirD2" colab_type="text"
# # **Predict Only 12 & 24 Times!**
# + id="EdVj6XlI5Guj" colab_type="code" colab={}
# split a multivariate sequence into samples
def split_sequences12(sequences, n_steps, n_samples=12000, start_from=0):
X, y = list(), list()
j = 0
for i in range(start_from, (start_from + n_samples)):
# find the end of this pattern
end_ix = j*12 + n_steps + start_from
# check if we are beyond the dataset
# gather input and output parts of the pattern
j = j + 1
seq_x = sequences[end_ix-11:end_ix, :]
seq_y = sequences[end_ix, 0]
y.append(seq_y)
X.append(seq_x)
print("End :=> ", end_ix)
return array(X), array(y)
| Q1 PartD/Q1 PartD - Weekly/MiniProj_LSTM_Adam_MAE_Q1_PartD_Pytorch_Weekly.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
from scipy.optimize import curve_fit
import mdtraj as md
def calc_NHVecs(traj_file, top_file, start_snap=0, end_snap=-1):
"""
Uses mdtraj to load the trajectory and get the atomic indices and coordinates to calculate the correlation functions.
For each, trajectory load the trajectory using mdtraj, get the atomic index for the the N-H atoms and calculate the vector between the two.
Append the vector to the NHVecs list for all the trajectories.
NHVecs should return a list of shape: (# Trajectories, # Snapshots, # Residues w/N-H Vectors, 3)
"""
traj = md.load(traj_file, top=top_file)
top = traj.topology
##AtomSelection Indices
Nit = top.select('name N and not resname PRO') ## PRO residue do not have N-H vectors
Hyd = top.select('name H and not resname PRO')
NH_Pair = [[i,j] for i,j in zip(Nit,Hyd)]
NH_Pair_Name = [[top.atom(i),top.atom(j)] for i,j in NH_Pair]
NH_Res = ["{}-{}{}".format(str(i).split('-')[0],str(i).split('-')[1], str(j).split('-')[1]) for i,j in NH_Pair_Name]
##Generate the N-H vectors in Laboratory Frame
NHVecs_tmp = np.take(traj.xyz, Hyd, axis=1) - np.take(traj.xyz, Nit, axis=1)
sh = list(NHVecs_tmp.shape)
sh[2] = 1
NHVecs_tmp = NHVecs_tmp / np.linalg.norm(NHVecs_tmp, axis=2).reshape(sh)
return NHVecs_tmp[start_snap:end_snap]
def split_NHVecs(nhvecs, dt, tau):
"""
This function will split the trajectory in chunks defined by tau.
nhvecs = array of N-H bond vectors,
dt = timestep of the simulation
tau = length of chunks
"""
nFiles = len(nhvecs) ## number of trajectories
nFramesPerChunk = int(tau/dt) ###tau/timestep
used_frames = np.zeros(nFiles,dtype=int)
remainingFrames = np.zeros(nFiles,dtype=int)
for i in range(nFiles):
nFrames = nhvecs[i].shape[0]
used_frames[i] = int(nFrames/nFramesPerChunk)*nFramesPerChunk
remainingFrames[i] = nFrames % nFramesPerChunk
nFramesTot=int(used_frames.sum())
out = np.zeros((nFramesTot,NHVecs[0].shape[1],NHVecs[0].shape[2]), dtype=NHVecs[0].dtype)
start = 0
for i in range(nFiles):
end = int(start+used_frames[i])
endv = int(used_frames[i])
out[start:end,...] = nhvecs[i][0:endv,...]
start = end
sh = out.shape
vecs = out.reshape((int(nFramesTot/nFramesPerChunk), nFramesPerChunk, sh[-2], sh[-1]))
return vecs
def calc_Ct(nhvecs):
"""
Calculates the correlation function of the N-H bond vectors found in nhvecs.
Direct space calculation. This could be changed to Fourier space calculation for increased speed.
LICENSE INFO:
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
sh = nhvecs.shape
nReplicates=sh[0] ; nDeltas=int(sh[1]/2) ; nResidues=sh[2]
Ct = np.zeros( (nDeltas, nResidues), dtype=nhvecs.dtype )
dCt = np.zeros( (nDeltas, nResidues), dtype=nhvecs.dtype )
for delta in range(1,1+nDeltas):
nVals=sh[1]-delta
# = = Create < vi.v'i > with dimensions (nRep, nFr, nRes, 3) -> (nRep, nFr, nRes) -> ( nRep, nRes ), then average across replicates with SEM.
tmp = -0.5 + 1.5 * np.square( np.einsum( 'ijkl,ijkl->ijk', nhvecs[:,:-delta,...] , nhvecs[:,delta:,...] ) )
tmp = np.einsum( 'ijk->ik', tmp ) / nVals
Ct[delta-1] = np.mean( tmp, axis=0 )
dCt[delta-1] = np.std( tmp, axis=0 ) / ( np.sqrt(nReplicates) - 1.0 )
return Ct, dCt
def _bound_check(func, params):
"""
Checks if the fit returns a sum of the amplitudes greater than 1.
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
if len(params) == 1:
return False
elif len(params) %2 == 0 :
s = sum(params[0::2])
return (s>1)
else:
s = params[0]+sum(params[1::2])
return (s>1)
def calc_chi(y1, y2, dy=[]):
"""
Calculates the chi^2 difference between the predicted model and the actual data.
LICENSE INFO:
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
if dy != []:
return np.sum( (y1-y2)**2.0/dy )/len(y1)
else:
return np.sum( (y1-y2)**2.0 )/len(y1)
# +
## Functions 1,3,5,7,9 are the functions that the sum of coefficients are equal to 1. They have one less parameter.
## Functions 2,4,6,8,10 are the functions where the sum of coefficients are not restricted.
def func_exp_decay1(t, tau_a):
return np.exp(-t/tau_a)
def func_exp_decay2(t, A, tau_a):
return A*np.exp(-t/tau_a)
def func_exp_decay3(t, A, tau_a, tau_b):
return A*np.exp(-t/tau_a) + (1-A)*np.exp(-t/tau_b)
def func_exp_decay4(t, A, tau_a, B, tau_b ):
return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b)
def func_exp_decay5(t, A, tau_a, B, tau_b, tau_g ):
return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + (1-A-B)*np.exp(-t/tau_g)
def func_exp_decay6(t, A, tau_a, B, tau_b, G, tau_g ):
return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g)
def func_exp_decay7(t, A, tau_a, B, tau_b, G, tau_g, tau_d):
return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + (1-A-B-G)*np.exp(-t/tau_d)
def func_exp_decay8(t, A, tau_a, B, tau_b, G, tau_g, D, tau_d):
return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + D*np.exp(-t/tau_d)
def func_exp_decay9(t, A, tau_a, B, tau_b, G, tau_g, D, tau_d, tau_e):
return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + D*np.exp(-t/tau_d) + (1-A-B-G-D)*np.exp(-t/tau_e)
def func_exp_decay10(t, A, tau_a, B, tau_b, G, tau_g, D, tau_d, E, tau_e):
return A*np.exp(-t/tau_a) + B*np.exp(-t/tau_b) + G*np.exp(-t/tau_g) + D*np.exp(-t/tau_d) + E*np.exp(-t/tau_e)
# -
def _return_parameter_names(num_pars):
"""
Function that returns the names of the parameters for writing to the dataframe after the fit.
num_pars is the number of parameters in the fit. 1,3,5,7,9 are the num_params that constrain the fit.
while the even numbers are the parameters for the functions that don't constrain the fits.
LICENSE INFO:
MIT License
Copyright (c) 2017 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
if num_pars==1:
return ['C_a', 'tau_a']
elif num_pars==2:
return ['C_a', 'tau_a']
elif num_pars==3:
return ['C_a', 'tau_a', 'tau_b']
elif num_pars==4:
return ['C_a', 'tau_a', 'C_b', 'tau_b']
elif num_pars==5:
return ['C_a', 'tau_a', 'C_b', 'tau_b', 'tau_g']
elif num_pars==6:
return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g']
elif num_pars==7:
return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'tau_d']
elif num_pars==8:
return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'C_d', 'tau_d']
elif num_pars==9:
return ['C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'C_d', 'tau_d', 'tau_e']
elif num_pars==10:
return [ 'C_a', 'tau_a', 'C_b', 'tau_b', 'C_g', 'tau_g', 'C_d', 'tau_d', 'C_e', 'tau_e']
return []
def do_Expstyle_fit2(num_pars, x, y, dy=np.empty([]), tau_mem=50.):
"""
Performs the exponential fit on the function defined by num_pars using scipy optimize curve fit.
Provides initial guesses for the amplitudes and the correlation times.
Takes the number of parameters, x values, y values, error in the y (dy), and tau_mem.
Tau_mem to help scale the initial guesses
Can also be set to np.inf if you want no bounds.
Returns, the Chi-squared value of the fit to the model along with the parameter values (popt),
the parameter error (popv) and the model itself.
"""
b1_guess = y[0]/num_pars/2
t1_guess = [tau_mem/1280.0, tau_mem/640.0, tau_mem/64.0, tau_mem/8.0]
if num_pars==1:
func=func_exp_decay1
guess=(t1_guess[2])
bound=(0.,np.inf)
elif num_pars==2:
func=func_exp_decay2
guess=(b1_guess, t1_guess[2])
bound=([0.0, x[0]],[1., np.inf])
elif num_pars==3:
func=func_exp_decay3
guess=(b1_guess, t1_guess[3], t1_guess[2])
bound=([0.0,x[0],x[0]],[1., np.inf, np.inf])
elif num_pars==4:
func=func_exp_decay4
guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2])
bound=([0.0, x[0], 0.0, x[0]],[1., np.inf, 1., np.inf])
elif num_pars==5:
func=func_exp_decay5
guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2], t1_guess[1])
bound=([0.0, x[0], 0.0, x[0],x[0]],[1., np.inf, 1., np.inf, np.inf])
elif num_pars==6:
func=func_exp_decay6
guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2], b1_guess, t1_guess[1])
bound=([0.0, x[0], 0.0, x[0], 0.0, x[0]],[1., np.inf, 1., np.inf, 1., np.inf])
elif num_pars==7:
func=func_exp_decay7
guess=(b1_guess, t1_guess[2], b1_guess, t1_guess[1], b1_guess, t1_guess[0],
t1_guess[3])
bound=([0.0, x[0], 0.0, x[0], 0.0, x[0], x[0]],[1., np.inf, 1., np.inf, 1., np.inf, np.inf])
elif num_pars==8:
func=func_exp_decay8
guess=(b1_guess, t1_guess[3], b1_guess, t1_guess[2], b1_guess, t1_guess[1],
b1_guess, t1_guess[0])
bound=([0.0, x[0], 0.0, x[0], 0.0, x[0], 0.0, x[0]],[1., np.inf, 1., np.inf, 1., np.inf, 1., np.inf])
if dy != []:
popt, popv = curve_fit(func, x, y, p0=guess, sigma=dy, bounds=bound, method='trf', loss='soft_l1')
else:
popt, popv = curve_fit(func, x, y, p0=guess, bounds=bound, loss='soft_l1')
ymodel=[ func(x[i], *popt) for i in range(len(x)) ]
#print ymodel
bExceed=_bound_check(func, popt)
if bExceed:
print >> sys.stderr, "= = = WARNING, curve fitting in do_LSstyle_fit returns a sum>1.//"
return 9999.99, popt, np.sqrt(np.diag(popv)), ymodel
else:
return calc_chi(y, ymodel, dy), popt, popv, ymodel
def findbest_Expstyle_fits2(x, y, taum=150.0, dy=[], bPrint=True, par_list=[2,3,5,7], threshold=1.0):
"""
Function tries to find the best set of parameters to describe the correlation fucntion for each residues
Takes the x,y values for the fit and the errors, dy. par_list is the number of parameters to check,
threshold is the cutoff for the chi2. This is the old way of checking, but can be re-implemented.
Runs the fit for a given parameter by calling do_Expstyle_fit3. The initial fit is chosen, but
subsequent fits are chosen with a strict criteria based on the ratio of the number of parameters from
the current best fit and the latest fit.
Returns the chi^2, names of the parameters, parameters, errors, model, and covariance matrix of the best fit.
"""
chi_min=np.inf
# Search forwards
print('Starting New Fit')
for npars in par_list:
print(npars)
names = _return_parameter_names(npars)
try:
chi, params, covarMat, ymodel = do_Expstyle_fit2(npars, x, y, dy, taum)
except:
print(" ...fit returns an error! Continuing.")
break
bBadFit=False
errors = np.sqrt(np.diag(covarMat))
step_check = 0
while step_check < npars:
## Check the error to make sure there is no overfitting
chkerr = errors[step_check]/params[step_check]
if (chkerr>0.10):
print( " --- fit shows overfitting with %d parameters." % npars)
print( " --- Occurred with parameter %s: %g +- %g " % (names[step_check], params[step_check],
errors[step_check]))
bBadFit=True
break
step_check += 1
## Chi^2 model fitting check.
## SclChk can be increased to make it easier to fit higher order models, or lower for a stronger criteria
## First model check is always set to 1.0 so its accepted
SclChk = 0.5
chi_check = chi/chi_min
if npars == par_list[0]:
threshold = 1.0
else:
threshold = (1-npar_min/npars)*SclChk
print("--- The chi_check for {} parameters is {}".format(npars, chi_check))
print("--- The threshold for this check is {}".format(threshold))
if (not bBadFit) and (chi/chi_min < threshold):
chi_min=chi ; par_min=params ; err_min=errors ; npar_min=npars ; ymod_min=ymodel; covar_min = covarMat;
else:
break;
tau_min = par_min[1::2]
sort_tau = np.argsort(tau_min)[::-1]
nsort_params = np.array([[2*tau_ind, 2*tau_ind+1] for tau_ind in sort_tau]).flatten()
err_min = err_min[nsort_params]
par_min = par_min[nsort_params]
sort_covarMat = covar_min[:,nsort_params][nsort_params]
names = _return_parameter_names(npar_min)
if bPrint:
print( "= = Found %d parameters to be the minimum necessary to describe curve: chi(%d) = %g vs. chi(%d) = %g)" % (npar_min, npar_min, chi_min, npars, chi))
print( "Parameter %d %s: %g +- %g " % (npar_min, len(names), len(par_min), len(err_min)))
for i in range(npar_min):
print( "Parameter %d %s: %g +- %g " % (i, names[i], par_min[i], err_min[i]))
print('\n')
return chi_min, names, par_min, err_min, ymod_min, sort_covarMat
def fitstoDF(resnames, chi_list, pars_list, errs_list, names_list):
## Set Up columns indices and names for the data frame
"""
Function that takes the residue names, chi^2, parameters, errors and names of the fits and returns a data frame
of the parameters.
"""
mparnames = _return_parameter_names(8) ## Always return the longest possible number of
mtau_names = np.array(mparnames)[1::2]
mc_names = np.array(mparnames)[::2]
colnames = np.array(['Resname','NumExp'])
tau_errnames = np.array([[c,"{}_err".format(c)] for c in mtau_names]).flatten()
mc_errnames = np.array([[c, "{}_err".format(c)] for c in mc_names]).flatten()
colnames = np.hstack([colnames,mc_errnames])
colnames = np.hstack([colnames,tau_errnames])
colnames = np.hstack([colnames,np.array(['Chi_Fit'])])
FitDF = pd.DataFrame(index=np.arange(len(pars_list)), columns=colnames).fillna(0.0)
FitDF['Resname'] = resnames
FitDF['Chi_Fit'] = chi_list
for i in range(len(pars_list)):
npar = len(pars_list[i])
if (npar%2)==1:
ccut = npar-2
tau_f, terr = pars_list[i][1:ccut+1:2], errs_list[i][1:ccut+1:2]
tau_f = np.hstack([tau_f, pars_list[i][-1]])
terr = np.hstack([terr, errs_list[i][-1]])
sort_tau = np.argsort(tau_f)
coeff, cerr= pars_list[i][0:ccut:2], errs_list[i][0:ccut:2]
Clast = 1; Clasterr = 0.0;
for n,m in zip(coeff, cerr):
Clast -= n
Clasterr += m
coeff = np.hstack([coeff, np.array(Clast)])
cerr = np.hstack([cerr, np.array(Clasterr)])
tne = np.array([[c,"{}_err".format(c)] for c in mparnames[1:npar+1:2]]).flatten()
cne = np.array([[c, "{}_err".format(c)] for c in mparnames[0:npar:2]]).flatten()
else:
tau_f, terr = pars_list[i][1::2], errs_list[i][1::2]
coeff, cerr= pars_list[i][0::2], errs_list[i][0::2]
sort_tau = np.argsort(tau_f)[::-1]
tne = np.array([[c,"{}_err".format(c)] for c in names_list[i][1::2]]).flatten()
cne = np.array([[c, "{}_err".format(c)] for c in names_list[i][0::2]]).flatten()
NumExp=np.array(len(tau_f))
tau_err = np.array([[t,e] for t,e in zip(tau_f[sort_tau],terr[sort_tau])]).flatten()
c_err = np.array([[c,e] for c,e in zip(coeff[sort_tau], cerr[sort_tau])]).flatten()
namesarr = np.hstack([np.array('NumExp'),cne,tne])
valarr = np.hstack([NumExp,c_err,tau_err])
FitDF.loc[i,namesarr] = valarr
FitDF['AUC_a'] = FitDF.C_a*FitDF.tau_a; FitDF['AUC_b'] = FitDF.C_b*FitDF.tau_b;
FitDF['AUC_g'] = FitDF.C_g*FitDF.tau_g; FitDF['AUC_d'] = FitDF.C_d*FitDF.tau_d;
FitDF['AUC_Total'] = FitDF[['AUC_a','AUC_b','AUC_g','AUC_d']].sum(axis=1)
return FitDF
def fitCorrF(CorrDF, dCorrDF, tau_mem, pars_l, fixfit=False, threshold=1.0):
"""
Input Variables:
CorrDF: Dataframe containing the correlation functions. Columns are the NH-bond vectors, rows are timesteps.
dCorrDF: Error in the correlation function at time t
tau_mem: Cut-Off time to remove noise at the tail of the correlation function
pars_l : parameters list.
fixfit : Boolean to decide if you want to use a specific exponential function
Main function to fit the correlation function.
Loops over all residues with N-H vectors and calculates the fit, appends the best fit from findbest_Expstyle_fits2.
Passes the set of lists to fitstoDF to return a data frame of the best fits for each residue.
Takes the correlation function CorrDF and errors in the correlation function, maximum tau mem to cut correlation
function off from, the list of parameters you want to fit too. If you don't want to test the fit and use
a fixed parameter set, set fixfit to True and pass a list of length 1 into pars_l.
"""
NH_Res = CorrDF.columns
chi_list=[] ; names_list=[] ; pars_list=[] ; errs_list=[] ; ymodel_list=[]; covarMat_list = [];
for i in CorrDF.columns:
tstop = np.where(CorrDF.index.values==tau_mem)[0][0]
x = CorrDF.index.values[:tstop]
y = CorrDF[i].values[:tstop]
dy = dCorrDF[i].values[:tstop]
## If there is no error provided i.e. no std. dev. over correlation functions is provided then set dy to empty set
if np.all(np.isnan(dy)):
dy = []
## if not fixfit then find find the best expstyle fit. Otherwise force the fit to nparams
if (not fixfit)&(len(pars_l)>1):
print("Finding the best fit for residue {}".format(i))
chi, names, pars, errs, ymodel, covarMat = findbest_Expstyle_fits2(x, y, tau_mem, dy,
par_list=pars_l, threshold=threshold)
elif (fixfit)&(len(pars_l)==1):
print("Performing a fixed fit for {} exponentials".format(int(pars_l[0]/2)))
chi, pars, covarMat, ymodel = do_Expstyle_fit2(pars_l[0], x, y, dy, tau_mem)
names = _return_parameter_names(len(pars))
errs = np.sqrt(np.diag(covarMat))
else:
print("The list of parameters is empty. Breaking out.")
break;
chi_list.append(chi)
names_list.append(names)
pars_list.append(pars)
errs_list.append(errs)
ymodel_list.append(ymodel)
FitDF = fitstoDF(NH_Res, chi_list, pars_list, errs_list, names_list)
return FitDF
def J_direct_transform(om, consts, taus):
"""
Calculation of the spectral density from the parameters of the fit by direct fourier transform
"""
## Calculation for the direct spectral density
ndecay=len(consts) ; noms=1;###lnden(om)
Jmat = np.zeros( (ndecay, noms ) )
for i in range(ndecay):
Jmat[i] = consts[i]*(taus[i]*1e-9)/(
1 + np.power((taus[i]*1e-9)*(om),2.))
return Jmat.sum(axis=0)
def calc_NMR_Relax(J, fdd, fcsa, gammaH, gammaN):
"""
Function to calculate the R1, R2 and NOE from the spectral densities and the physical parameters for the
dipole-dipole and csa contributions, fdd and fcsa.
"""
R1 = fdd * (J['Diff'] + 3*J['15N'] + 6*J['Sum']) + fcsa * J['15N']
R2 = (0.5 * fdd * (4*J['0'] + J['Diff'] + 3*J['15N'] + 6*J['1H'] + 6*J['Sum'])
+ (1./6.) * fcsa*(4*J['0'] + 3*J['15N']) )
NOE = 1 + ((fdd*gammaH)/(gammaN*R1))*(6*J['Sum'] - J['Diff'])
return R1, R2, NOE
# # Begin Implementation of Code:
# ## Definition of global file locations
# 1. Notebook can be run in the local directory, in which case, skip over the first cell
# 2. File locations of trajectories to be loaded using mdtraj for calculation of N-H bond vectors. These should be changed by the user.
#
# +
## Global Variables for the calculation of the NH Vecs and the correlation functions
FileLoc = "" ## Main Directory Location
RUN = ["Run{}".format(i) for i in range(1,5)]
JOBS = ['PROD1','PROD2','PROD3']
## For use if replicate trajectories are stored as follows
TRAJLIST_LOC = ["{}/Analysis/{}".format(J,R) for J in JOBS for R in RUN]
FTOPN = "Q15.gro" ## Name of topology for the trajectory
FMDN = "Q15.noH20.xtc" ## Name of the trajectory, should be centered and stripped of solute
# -
# ## Definition of physical constants and parameters
# 1. Several parameters should be changed if necessary
# a. B0 --> Set to experimental magnetic field you want to compare against
# b. dSigmaN --> -170e-6 is a well-established value, but can be changed
# 2. Units are in s in the parameters, but the timesteps should be in ns. Converted in J_direct_transform.
# +
## Parameters and Physical Constants for calculation of Relaxation Rates
H_gyro = 2*np.pi*42.57748*1e6 ## Gyromagnetic Ratio: Hydrogen ([rad]/[s][T])
N_gyro = -2*np.pi*4.317267*1e6 ## Gyromagnetic Ratio: Nitrogen ([rad]/[s][T])
B0 = 18.8 ## Field Strength = 18.8 [Teslas]
## Need 5 Frequencies: ## J[0], J[wH], J[wN], J[wH-wN], J[wH+wN]
Larmor1H = H_gyro*B0 ## Larmor Frequency: Hydrogen ([rad]/[s])
Larmor15N = N_gyro*B0 ## Larmor Frequency: Hydrogen ([rad]/[s])
omDiff = Larmor1H - Larmor15N ## Diff in Larmor Frequencies of Spin IS
omSum = Larmor1H + Larmor15N ## Sum of Larmor Frequencies of Spin IS
mu_0 = 4*np.pi*1e-7 ; ## Permeability of Free Space: ([H]/[m])
hbar = 1.0545718e-34 ; ## Reduced Plank's constant: [J] * [s] = [kg] * [m^2] * [s^-1]
R_NH = 1.02e-10 ## distance between N-H atoms in Angstroms
dSigmaN = -170e-6 ## CSA of the S-spin atom
FDD = (1./10.)*np.power((mu_0*hbar*H_gyro*N_gyro)/(4*np.pi*np.power(R_NH,3)),2)
#FCSA = 498637299.69233465
FCSA = (2.0/15.0)*(Larmor15N**2)*(dSigmaN**2) ## CSA factor
# -
# ## Load trajectories and calculate the NH-Vecs in the laboratory frame
# ### Skip to calculation of correlation functions if already performed
## Change directory to examples to test code
# %cd EXAMPLES
## Calculate the NHVecs; Can be adapted to loop over multiple trajectories using TRAJLIST_LOC
NHVecs = []
start=0; end=-1; ##
NHV = calc_NHVecs(FMDN, FTOPN, start, end)
NHVecs.append(NHV)
# +
dt = 10 ## timestep of simulations: (ps)
tau_split = np.array(NHVecs).shape[1]*dt ## Number of snapshots to calculate the correlation function over.
## Split the vecs based off the tau_split you want and the time step.
vecs_split = split_NHVecs(NHVecs, dt, tau_split)
# -
## Calculate the correlation functions and the standard deviation in the correlation function.
## Save the correlation functions in a dataframe and then to a csv file for later use.
Ct, dCt = calc_Ct(vecs_split)
## Convert to dataframe with index set as timesteps in ns
CtOutFname = 'NH_Ct.csv'
dCtOutFname = 'NH_dCt.csv'
CtDF = pd.DataFrame(Ct, index = np.arange(1, Ct.shape[0]+1)*dt/1000)
dCtDF = pd.DataFrame(dCt, index = np.arange(1, dCt.shape[0]+1)*dt/1000)
CtDF.to_csv(CtOutFname)
dCtDF.to_csv(dCtOutFname)
# ## Begin fitting of the correlation functions
# 1. Load the correlation functions from before
# 2. Calculate the correlation functions
# a. For a single exponential model, fixfit=True
# b. Find the best exponential model, fixfit=False (default)
# 3. Pass the fitted parameters for each residue to calculate the spectral density
# 4. Calculate the NMR relaxation parameters.
## Load the correlation functions from the saved csv files
CtInName = 'NH_Ct.csv'
dCtInName = 'NH_dCt.csv'
CtDF = pd.read_csv(CtInName, index_col=0)
dCtDF = pd.read_csv(dCtInName, index_col=0)
tau_mem=2.5 ## Cut off to remove noise from the tail of the correlation function in the fit (ns)
fixfit = True ## find the best model
parameters_list = [4] ## for fixfit = False
thresh=1.0 ##
FitDF = fitCorrF(CtDF, dCtDF, tau_mem, parameters_list, fixfit, thresh)
# +
## Calculate spectral density from the FitDF by calling the J_direct_transform function for each of the 5 frequencies.
## Loop over the rows of the FitDF dataframe from fitCorrF function and calcuate the spectral densities.
## Save the spectral densities to a dictionary and append to a list.
Jarr = []
for i,fit in FitDF.iterrows():
c = fit[['C_a','C_b','C_g','C_d']].values
t = fit[['tau_a','tau_b','tau_g','tau_d']].values
Jdict = {'0':0, '1H':0,'15N':0,'Sum':0,'Diff':0}
J0 = J_direct_transform(0, c, t)
JH = J_direct_transform(Larmor1H, c, t)
JN = J_direct_transform(Larmor15N, c, t)
JSum = J_direct_transform(omSum, c, t)
JDiff = J_direct_transform(omDiff, c, t)
Jdict['1H'] = JH ; Jdict['15N'] = JN; Jdict['0'] = J0;
Jdict['Sum'] = JSum; Jdict['Diff'] = JDiff;
Jarr.append(Jdict)
# +
## Calculate NMR relaxation parameters for each residue by calling calc_NMR_relax
## Save the T1, T2 and NOE parameters to a dataframe
NMRRelaxDF = pd.DataFrame(np.zeros((len(Jarr),3)),index=range(1,len(Jarr)+1), columns=['T1','T2','NOE'])
for index in range(1,len(Jarr)+1):
r1, r2, noe = calc_NMR_Relax(Jarr[index-1], FDD, FCSA, H_gyro, N_gyro)
NMRRelaxDF.loc[index,'T1'] = 1/r1;
NMRRelaxDF.loc[index,'T2'] = 1/r2;
NMRRelaxDF.loc[index,'NOE'] = noe;
NMRRelaxDF['Resname'] = FitDF['Resname'].values
NMRRelaxDF['RESNUM'] = NMRRelaxDF['Resname'].str.extract('([0-9]+)',expand=False).astype('int')+1
# -
## Merge the NMR relaxation dataframes with the FitDF dataframe
FitRelaxDF = FitDF.merge(NMRRelaxDF, how='left', left_on='Resname',right_on='Resname').set_index(NMRRelaxDF.index)
## Save FitRelaxDF to a csv file
FitRelaxDF.to_csv('NMRRelaxtionDF.csv')
| CorrFunction_NMRRelaxation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Bode Plots
import control
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = (5, 5)
s = control.tf([1, 0], [0, 1])
# ## Derivative Terms
#
# $ G(s) = s$
G_D = s
control.bode(G_D, dB=True);
G_D
# ## Integral Terms
#
# $ G(s) = \dfrac{1}{s}$
G_I = 1/s
control.bode(G_I, dB=True);
G_I
# ## First Order Zeros
#
# $ G(s) = \dfrac{s + \omega}{\omega}$
G_FZ = (s+1)/1
control.bode(G_FZ, dB=True);
G_FZ
# ## First Order Poles
#
# $ G(s) = \dfrac{\omega}{s + \omega}$
G_FP = 1/(s+1)
control.bode(G_FP, dB=True);
G_FP
# ## Second Order Poles: Case 1, Real Poles
#
# $G(s) = \dfrac{\omega}{s^2 + 2 \zeta \omega s + \omega_n^2}$
#
# $\zeta >= 1$
#
# Factor into two first order poles.
# +
zeta = 10
wn = 1
G_SR = 1/(s**2 + 2*zeta*wn*s + wn**2)
control.bode(G_SR, dB=True);
print('roots', np.roots([1, 2*zeta*wn, wn**2]))
G_SR
# -
# ## Second Order Poles: Case 2, Imaginary Poles
#
# $G(s) = \dfrac{\omega_n}{s^2 + 2 \zeta \omega_n s + \omega_n^2}$
#
# $\zeta < 1$
# +
zeta = 0.1
wn = 1
G_SR = 1/(s**2 + 2*zeta*wn*s + wn**2)
G_damp = [1/(s**2 + 2*zeta*wn*s + wn**2) for zeta in np.arange(0.01, 0.9, 0.2)]
control.bode(G_damp, dB=True, omega=np.logspace(-2, 2, 1000));
G_SR
# -
# ## Unstable Poles and Zeros
#
# * Same magnitude
# * Oppositive slope
#
# $G(s) = \dfrac{1}{s + 1}$
#
# vs.
#
# $G(s) = \dfrac{1}{s - 1}$
control.bode([1/(s+1), 1/(s-1)], dB=True);
ax = plt.gca()
# $G(s) = s + 1$
#
# vs.
#
# $G(s) = s - 1$
control.bode([(s+1), (s-1)], dB=True);
# ## Leading Negative
#
# * Add 180 deg phase.
#
# $G(s) = -\dfrac{1}{s + 1}$
control.bode([1/(s+1), -1/(s+1)], dB=True);
# ## **Bode Plot Rules Summary**
#
# 1. Find $|G(0)|$, where your magnitude plot starts and $|G(\infty)|$, where it ends.
# 1. Find $\angle G(0)$, where your phase plot starts and $\angle G(\infty)$, where it ends.
# 1. Make table of slope and phase contribution for each factor. If you have a second order pole or zero if it has real roots, treat as two first order sytems. If it has complex roots, find the natural frequency and the damping ratio. The damping ratio determines the size of the resonant peak, and the natural frequency is the corner frequency.
# 1. Draw asymptotes using $|G(0)|$ to start the magnitude plot and $\angle G(0)$ to start the phase plot.
# 1. Interpolate between asymptotes for magnitude and phase.
#
# Zeros:
#
# | factor | corner freq. (rad/s) | phase (deg) | slope (dB/dec) |
# |- -|- -|- -|- -|
# |$s$|0 | +90 | +20|
# |$(s+\omega)$|$\omega$| +90 | +20|
# |$(s-\omega)$|$\omega$| -90 | +20|
# |$(s^2 + 2\zeta \omega_n s + \omega_n^2)$|$\omega_n$| +180 | +40|
#
# Poles:
#
# | factor | corner freq. (rad/s) | phase (deg) | slope (dB/dec) |
# |- -|- -|- -|- -|
# |$1/s$| 0 | -90 |-20|
# |$1/(s+\omega)$|$\omega$| -90 | -20|
# |$1/(s-\omega)$|$\omega$| +90 | -20|
# |$1/(s^2 + 2\zeta \omega_n s + \omega_n^2)$|$\omega_n$| -180 | -40|
#
# ## Combining Factors
#
# $G(s) = \dfrac{-1}{(s-1)(s-2)}$
#
# $|G(0)| = |-1/2| = 1/2 \approx -6 dB$
#
# $\angle G(0) = \angle -1/2 = -180 deg$
#
# | factor | corner freq. (rad/s) | phase (deg) | slope (dB/dec) |
# |- -|- -|- -|- -|
# |$1/(s-1)$|1|+90|-20|
# |$1/(s-2)$|2|+90|-20|
#
# Notice phase contribution of the pole is now positive, since it is an unstable pole.
#
import control
s = control.tf([1, 0], [0, 1])
control.bode(-1/((s-1)*(s-2)), dB=True);
| 09.Bode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import nltk
paragraph = """I have three visions for India. In 3000 years of our history, people from all over
the world have come and invaded us, captured our lands, conquered our minds.
From Alexander onwards, the Greeks, the Turks, the Moguls, the Portuguese, the British,
the French, the Dutch, all of them came and looted us, took over what was ours.
Yet we have not done this to any other nation. We have not conquered anyone.
We have not grabbed their land, their culture,
their history and tried to enforce our way of life on them.
Why? Because we respect the freedom of others.That is why my
first vision is that of freedom. I believe that India got its first vision of
this in 1857, when we started the War of Independence. It is this freedom that
we must protect and nurture and build on. If we are not free, no one will respect us.
My second vision for India’s development. For fifty years we have been a developing nation.
It is time we see ourselves as a developed nation. We are among the top 5 nations of the world
in terms of GDP. We have a 10 percent growth rate in most areas. Our poverty levels are falling.
Our achievements are being globally recognised today. Yet we lack the self-confidence to
see ourselves as a developed nation, self-reliant and self-assured. Isn’t this incorrect?
I have a third vision. India must stand up to the world. Because I believe that unless India
stands up to the world, no one will respect us. Only strength respects strength. We must be
strong not only as a military power but also as an economic power. Both must go hand-in-hand.
My good fortune was to have worked with three great minds. Dr. <NAME> of the Dept. of
space, Professor <NAME>, who succeeded him and Dr. <NAME>, father of nuclear material.
I was lucky to have worked with all three of them closely and consider this the great opportunity of my life.
I see four milestones in my career"""
# -
# Cleaning the texts
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
ps = PorterStemmer()
wordnet=WordNetLemmatizer()
sentences = nltk.sent_tokenize(paragraph)
corpus = []
for i in range(len(sentences)):
review = re.sub('[^a-zA-Z]', ' ', sentences[i])
review = review.lower()
review = review.split()
review = [wordnet.lemmatize(word) for word in review if not word in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
corpus
# Creating the TF-IDF model
from sklearn.feature_extraction.text import TfidfVectorizer
cv = TfidfVectorizer()
X = cv.fit_transform(corpus).toarray()
X.shape
| TF-IDF/Natural Language Processing TF-IDF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import sklearn as sk
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
import sys
import re
import numpy as np
# retrieve data
insta = pd.read_csv('../../data/merged_file.csv', delimiter=';')
insta.head(20)
small_set = insta['auteur', 'bericht tekst', 'hashtags']
| machine learning/Text analyse/Ties.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import blackjackML as bjml
n_games = 1000
n_decks = 6
# create and add players
p1 = bjml.Player(name='<NAME>', strategy=bjml.OptimalStrategy())
players = [p1]
table = bjml.Table(players, n_decks)
for game_id in range(n_games):
#empty hands
table.reset()
#betting
table.play_a_game(game_id)
# -
| Notebooks/Blackjack with Package-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Agenda
#Why Index is important
#Index Array
#Negative Indexes
#Range of Indexes
#Warning: You can never change a Series/ DataFrame index once assigned
#Workaround- if you want to change index name
# -
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
s1= Series([10,20,30,40], index= ['a','b','c','d'])
print(s1)
index_obj= s1.index
print(index_obj)
index_obj[0]
# +
#Why Index is important
# -
#Negative Indexes
index_obj[-2:]
index_obj[:-2]
#Range of Indexes
index_obj[2:4]
# +
#Warning: You can never change a Series/ DataFrame index once assigned
print(index_obj)
index_obj[0] = "AA"
# -
#Workaround- if you want to change index name
print(s1.rename(index= {"a": "AA"}))
print(s1)
s1= s1.rename(index= {"a": "AA"})
print(s1)
| Week 1/Section 3/Indexes in Pandas- 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### PyFVCOM plotting tools examples
#
# Here, we demonstrate plotting in three different dimensions: horizontal space, vertical space and time.
#
# First we load some model output into an object which can be passed to a number of plotting objects. These objects have methods for plotting different aspects of the data.
#
# For the horizontal plots, we plot the sea surface elevation across the model domain at a given time.
# For the vertical plots, we take a transect through the water column temperature data and plot it.
# For the time plots, we plot both a simple surface elevation time series and a time-varying water column temperature plot.
#
# %matplotlib inline
# Load an FVCOM model output and plot a surface.
from PyFVCOM.read import FileReader
from PyFVCOM.plot import Plotter, Time, Depth
from PyFVCOM.tide import make_water_column
from cmocean import cm
import matplotlib.pyplot as plt
# Create an object which holds the model outputs. We're only loading
# surface elevation and temperature for the first 200 time steps.
fvcom = FileReader('sample.nc', dims={'time': range(200)}, variables=['zeta', 'temp'])
# Make a plot of the surface elevation.
plot = Plotter(fvcom,
figsize=(20, 20),
res='i',
tick_inc=(4, 2),
cb_label='{} ({})'.format(fvcom.atts.zeta.long_name,
fvcom.atts.zeta.units),
cmap=cm.balance)
plot.plot_field(fvcom.data.zeta[5, :])
plot.axes.set_title(fvcom.time.datetime[5].strftime('%Y-%m-%d %H:%M:%S'))
# Plot a temperature transect between two locations.
positions = np.array(((-5, 50), (-4.5, 49.5)))
indices, distance = fvcom.horizontal_transect_nodes(positions)
plot = Depth(fvcom, figsize=(20, 9),
cb_label='Temperature ({})'.format(fvcom.ds.variables['temp'].units),
cmap=cm.thermal)
# fill_seabed makes the part of the plot below the seabed grey.
plot.plot_slice(distance / 1000, # to kilometres from metres
fvcom.grid.siglay_z[:, indices],
fvcom.data.temp[4, :, indices],
fill_seabed=True)
plot.axes.set_xlim(right=(distance / 1000).max()) # set the x-axis to the data range
plot.axes.set_xlabel('Distance (km)')
plot.axes.set_ylabel('Depth (m)')
# Save the figure.
plot.figure.savefig('temperature_profile.png')
# Do a time series at a specific location.
gauge = (-5, 55) # a sample (lon, lat) position
index = fvcom.closest_node(gauge)
time = Time(fvcom, figsize=(20, 9), title='{} at {}, {}'.format(fvcom.atts.zeta.long_name,
*gauge))
time.plot_line(fvcom.data.zeta[:, index], color='r')
time.axes.set_ylabel('{} ({})'.format(fvcom.atts.zeta.long_name,
fvcom.atts.zeta.units))
# Plot a depth-varying time profile through a water column
fvcom = FileReader('sample.nc', variables=['temp', 'zeta'], dims={'time': range(400), 'node': [5000]})
time = Time(fvcom, figsize=(20, 9), cb_label='{} ({})'.format(fvcom.atts.temp.long_name,
fvcom.atts.temp.units))
z = make_water_column(fvcom.data.zeta, fvcom.grid.h, fvcom.grid.siglay)
# fill_seabed makes the part of the plot below the seabed grey.
# We need to squeeze the data array since we've only extracted a single
# position.
time.plot_surface(z, np.squeeze(fvcom.data.temp), fill_seabed=True)
time.axes.set_ylabel('Depth (m)')
| examples/pyfvcom_plot_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 34089, "status": "ok", "timestamp": 1608604711163, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="UpFsnmrgiwO2" outputId="f8a3a302-de39-404c-ba6c-06bef9e66745"
# Data hosted at below link (downloading it)
# !wget https://www.dropbox.com/s/tlxserrdhe240lu/archive.zip
# Unzipping the data
# !unzip -q "archive.zip"
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" executionInfo={"elapsed": 2949, "status": "ok", "timestamp": 1608604751191, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="qL8tATsLisy7"
# Imports required for this project
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
tf.random.set_seed(4)
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" executionInfo={"elapsed": 3464, "status": "ok", "timestamp": 1608604751900, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="a143XKgwisy-"
# Creating the Pathlib PATH objects
train_path = Path("chest_xray/train/")
validation_path = Path("chest_xray/test")
test_path = Path("chest_xray/val")
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3256, "status": "ok", "timestamp": 1608604751901, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="tw6gvicrisy-" outputId="3c61b706-50c7-462b-969b-51ea9705a0ef"
# Collecting all the Paths Inside "Normal" and "Pneumonia" folders of the above paths
train_image_paths = train_path.glob("*/*")
val_image_paths = validation_path.glob("*/*")
# Output is a Generator object
print(train_image_paths)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 3077, "status": "ok", "timestamp": 1608604751903, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="d7G0gpW4iszB" outputId="86305c8b-69a9-4cc3-edc4-78d2d57447ae"
# Convert Generator Object to List of elements
train_image_paths = list(train_image_paths)
val_image_paths = list(val_image_paths)
# Now the outputs are "PosixPath" objects
print(train_image_paths[:3])
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2887, "status": "ok", "timestamp": 1608604751905, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="K86iKTUGiszB" outputId="54cf7e59-9fee-40d5-9226-f818bffe71be"
# Convert Posix paths to normal strings
train_image_paths = list(map(lambda x : str(x) , train_image_paths))
val_image_paths = list(map(lambda x : str(x) , val_image_paths))
print(train_image_paths[:3])
# + executionInfo={"elapsed": 2712, "status": "ok", "timestamp": 1608604751906, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="9Y2gah5YiszC"
# Collect Length for Training and Validation Datasets
train_dataset_length = len(train_image_paths)
val_dataset_length = len(val_image_paths)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 2550, "status": "ok", "timestamp": 1608604751907, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="ARroUXTeiszC" outputId="978315be-bc57-471c-dc1c-2b8a0ddeae0f"
# Every Image has Label in its path , so lets slice it
LABELS = {'NORMAL' : 0 , 'PNEUMONIA' : 1}
INV_LABELS = {0 : 'NORMAL', 1 : 'PNEUMONIA'}
def get_label(path : str) -> int:
return LABELS[path.split("/")[-2]]
train_labels = list(map(lambda x : get_label(x) , train_image_paths))
val_labels = list(map(lambda x : get_label(x) , val_image_paths))
print(train_labels[:3])
# + executionInfo={"elapsed": 2374, "status": "ok", "timestamp": 1608604751909, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="d7PY4Rc-iszC"
# Now we have all training, validation image paths and their respective labels
BATCH_SIZE = 32
# Function used for Transformation
def load_and_transform(image , label , train = True):
image = tf.io.read_file(image)
image = tf.io.decode_jpeg(image , channels = 3)
image = tf.image.resize(image , [224 , 224] , method="nearest")
if train:
image = tf.image.random_flip_left_right(image)
return image , label
# Function used to Create a Tensorflow Data Object
def get_dataset(paths , labels , train = True):
image_paths = tf.convert_to_tensor(paths)
labels = tf.convert_to_tensor(labels)
image_dataset = tf.data.Dataset.from_tensor_slices(image_paths)
label_dataset = tf.data.Dataset.from_tensor_slices(labels)
dataset = tf.data.Dataset.zip((image_dataset , label_dataset)).shuffle(1000)
dataset = dataset.map(lambda image , label : load_and_transform(image , label , train))
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
return dataset
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 12265, "status": "ok", "timestamp": 1608604761906, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="xD-a4p8IiszC" outputId="7ef50779-4df0-4031-923e-96f83f45d941"
# Creating Train Dataset object and Verifying it
# %time train_dataset = get_dataset(train_image_paths , train_labels)
image , label = next(iter(train_dataset))
print(image.shape)
print(label.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 302} executionInfo={"elapsed": 12086, "status": "ok", "timestamp": 1608604761909, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="pdhLKuCYiszD" outputId="2b69abf7-05be-4b51-f242-950b01c63421"
# View a sample Train Image
print(INV_LABELS[label[0].numpy()])
plt.imshow(image[0].numpy().reshape(224 , 224 , 3))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 23926, "status": "ok", "timestamp": 1608604773909, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="BuVPqSKLiszE" outputId="6e890d9a-43a6-449f-f903-4a500b013cf7"
# %time val_dataset = get_dataset(val_image_paths , val_labels , train = False)
image , label = next(iter(val_dataset))
print(image.shape)
print(label.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 302} executionInfo={"elapsed": 23722, "status": "ok", "timestamp": 1608604773912, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="yHADl0QjiszE" outputId="7d427c9d-e5b3-4167-bec8-e3773555459c"
# View a sample Validation Image
print(INV_LABELS[label[0].numpy()])
plt.imshow(image[0].numpy().reshape(224 , 224 , 3))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 25607, "status": "ok", "timestamp": 1608604775984, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="hs4l2C_yiszE" outputId="7a207e64-e370-4a00-de53-3090b1dda415"
# Building ResNet50 model
from tensorflow.keras.applications import ResNet50V2
backbone = ResNet50V2(
input_shape=(224, 224, 3),
include_top=False
)
model = tf.keras.Sequential([
backbone,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(1, activation='sigmoid')
])
model.summary()
# + id="N9uS2NV1r1E7"
# Compiling your model by providing the Optimizer , Loss and Metrics
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07),
loss = 'binary_crossentropy',
metrics=['accuracy' , tf.keras.metrics.Precision(name='precision'),tf.keras.metrics.Recall(name='recall')]
)
# + id="Q5ChquDKiszF"
# Defining our callbacks
checkpoint = tf.keras.callbacks.ModelCheckpoint("best_weights.h5",verbose=1,save_best_only=True,save_weights_only = True)
early_stop = tf.keras.callbacks.EarlyStopping(patience=4)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 856090, "status": "ok", "timestamp": 1608113168199, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="Lzd2_xyRiszF" outputId="efbfed7c-2a3f-43df-ac45-090ed4be5a40"
# Train the model
history = model.fit(
train_dataset,
steps_per_epoch=train_dataset_length//BATCH_SIZE,
epochs=8,
callbacks=[checkpoint , early_stop],
validation_data=val_dataset,
validation_steps = val_dataset_length//BATCH_SIZE,
)
# + colab={"base_uri": "https://localhost:8080/", "height": 241} executionInfo={"elapsed": 856656, "status": "ok", "timestamp": 1608113168780, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="tKHlpd9AiszF" outputId="acd02189-56ec-4cff-dd93-3a75f41ef2fc"
# Interpreting the Metrics
fig, ax = plt.subplots(1, 4, figsize=(20, 3))
ax = ax.ravel()
for i, met in enumerate(['precision', 'recall', 'accuracy', 'loss']):
ax[i].plot(history.history[met])
ax[i].plot(history.history['val_' + met])
ax[i].set_title('Model {}'.format(met))
ax[i].set_xlabel('epochs')
ax[i].set_ylabel(met)
ax[i].legend(['train', 'val'])
# + id="dYxpkMFBSKez"
# Saving the best Model
# Load the best weights
model.load_weights("best_weights.h5")
# Save the whole model (weigths + architecture)
model.save("model.h5")
# + id="UHS1TRugSkN5"
# Loading the whole model
loaded_model = tf.keras.models.load_model("model.h5")
# + [markdown] id="gyj6g2FFiszF"
# Create a Testing Dataset
# + id="904ryp1fiszF"
# Create a Dataset Object for 'Testing' Set just the way we did for Training and Validation
test_image_paths = list(test_path.glob("*/*"))
test_image_paths = list(map(lambda x : str(x) , test_image_paths))
test_labels = list(map(lambda x : get_label(x) , test_image_paths))
test_image_paths = tf.convert_to_tensor(test_image_paths)
test_labels = tf.convert_to_tensor(test_labels)
def decode_image(image , label):
image = tf.io.read_file(image)
image = tf.io.decode_jpeg(image , channels = 3)
image = tf.image.resize(image , [224 , 224] , method="nearest")
return image , label
test_dataset = (
tf.data.Dataset
.from_tensor_slices((test_image_paths, test_labels))
.map(decode_image)
.batch(BATCH_SIZE)
)
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 860813, "status": "ok", "timestamp": 1608113172970, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="IUMb4GpKiszF" outputId="0ebd5b71-e3e4-43fa-a467-5e0a42593cb4"
# Verify Test Dataset Object
image , label = next(iter(test_dataset))
print(image.shape)
print(label.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 302} executionInfo={"elapsed": 861893, "status": "ok", "timestamp": 1608113174064, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="7ydJ_X42iszG" outputId="e4225e28-c0cc-413e-b4cc-776bd1f875be"
# View a sample Validation Image
print(INV_LABELS[label[0].numpy()])
plt.imshow(image[0].numpy().reshape(224 , 224 , 3))
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 862588, "status": "ok", "timestamp": 1608113174771, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhlwwwMJr16CgjmRFWk833xZEy2OfpaFZDIqM84=s64", "userId": "00311744807213723199"}, "user_tz": -330} id="kyKJ8663iszG" outputId="fb7a484f-0689-4a71-8b5b-83422ae691ee"
# Evaluating the loaded model
loss, acc, prec, rec = loaded_model.evaluate(test_dataset)
print(" Testing Acc : " , acc)
print(" Testing Precision " , prec)
print(" Testing Recall " , rec)
| Day-76/chestxray-cnn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Libraries needed for the tutorial
import pandas as pd
import requests
import io
# Downloading the csv file from your GitHub account
url = "https://raw.githubusercontent.com/docju/stackoverflow/master/constits.csv" # Make sure the url is the raw version of the file on GitHub
download = requests.get(url).content
df = pd.read_csv(io.StringIO(download.decode('utf-8')))
# Printing out the first 5 rows of the dataframe
df.columns
# -
| Untitled (2).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import scipy as scp
import random as rd
from sklearn.decomposition import PCA
from sklearn import preprocessing
from sklearn import datasets
from sklearn.covariance import LedoitWolf
import seaborn as sns
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# ## Data of OAD Cleaned
#Initialize dataframe from excel spreadsheet
oad = pd.read_excel("oxy_avail_dataset.xls")
oad.head()
#Drop rows that describe the index and set the index
oad_cleaned = oad.loc[:, :'225C 16'].set_index('OTUId')
oad_cleaned.head()
#Make list of indecies and column headers
samples = list(oad_cleaned.index.values)
sites = list(oad_cleaned)
# ## Data of OAD Normalized (with samples as the rows and sites as the columns)
#Normalize cleaned data
# use this one: standard scalar - 0 value of the vector = 0, and the unit variance - will be between -0.5-0.5
# also make labels as species/samples not PC1
scaler = preprocessing.StandardScaler()
scaler_df = scaler.fit_transform(oad_cleaned)
oad_norm = pd.DataFrame(scaler_df, index=samples, columns=sites)
#oad_norm.columns = columns
oad_norm
#Compute pairwise covariance of columns (sites)
covar = oad_norm.cov()
covar.head()
#Initialize ledoit-wolf object
lw = LedoitWolf()
#LW Shrinkage of Covariance Matrix
#Does it use the covariance matrix or regular input matrix?
lwdf_norm = pd.DataFrame(lw.fit(covar).get_precision())
lwdf_norm
# +
#Take the inverse of the lw matrix
#This is the partial correlation matrix
parcor_norm = pd.DataFrame(np.linalg.inv(lwdf_norm))
parcor_norm
| par_cor_oad_norm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# # Save a 2D static flatmap as PNG
#
#
# Plot a 2D static flatmap and save it as PNG file.
#
# **Some words on the `recache` parameter before we begin:**
#
# Setting the `recache=True` parameter recaches the flatmap cache located in
# <filestore>/<subject>/cache. By default intermediate steps for a flatmap are
# cached after the first generation to speed up the process for the future. If
# any of the intermediate steps changes, the flatmap generation may fail.
# `recache=True` will load these intermediate steps new.
# This can be helpful if you think there is no reason that the
# `quickflat.make_figure` to fail but it nevertheless fails. Try it, it's magic!
#
# The default background is set to be a transparent image. If you want to change
# that use the parameter `bgcolor`.
#
# +
import cortex
import matplotlib.pyplot as plt
import numpy as np
np.random.seed(1234)
# Create a random pycortex Volume
volume = cortex.Volume.random(subject='S1', xfmname='fullhead')
# Plot a flatmap with the data projected onto the surface
_ = cortex.quickflat.make_figure(volume)
plt.show()
# Save this flatmap
filename = "./my_flatmap.png"
_ = cortex.quickflat.make_png(filename, volume, recache=False)
| example-notebooks/quickflat/plot_make_png.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 10 ``Numpy`` 中的比较和Fancy Indexing
# ### Fancy Indexing
# +
import numpy as np
x = np.arange(16)
x
# -
x[3]
x[3:9]
x[3:9:2]
[x[3], x[5], x[7]]
ind = [3, 5, 8]
x[ind]
# 对于向量传入一个索引数组,返回这些索引所对应的值
ind = np.array([[0, 2], [1, 3]])
x[ind]
# 按照这些索引排列的二维向量
# #### Fancy Indexing 应用在二维数组
X = x.reshape(4, -1)
X
row = np.array([0, 1, 2])
col = np.array([1, 2, 3])
X[row, col]
X[0, col]
X[:2, col]
col = [True, False, True, True]
X[0, col]
# ### ``numpy.array`` 的比较
x
x < 3
x > 3
x <= 3
x >= 3
x == 3
x != 3
2 * x == 24 - 4 * x
X < 6
# ### 使用 ``numpy.array`` 的比较结果
x
a = x[x <= 4]
a
np.count_nonzero(a)
np.sum(x <= 3)
np.sum(X % 2 == 0, axis=0)
np.sum(X % 2 == 0, axis=1)
np.any(x == 0)
# 是否有为0的元素
np.any(x < 0)
np.all(x > 0)
np.all(x >= 0)
np.all(X > 0, axis=1)
np.sum((x > 3) & (x < 10))
# 位运算符
np.sum((x > 3) && (x < 10))
np.sum((x % 2 == 0) | (x > 10))
np.sum(~(x == 0))
# 非运算符
# ### 比较结果和Fancy Indexing
x < 5
x[x < 5]
x[x % 2 == 0]
X
X[:,-1]
X[X[:,-1] % 3 == 0, :]
# 最后一列能被能被3整除,显示全行
| 03-Jupyter-Notebook-Numpy-and-Matplotlib/10-Comparison-and-Fancy-Indexing/10-Comparison-and-Fancy-Indexing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ablang-test
# language: python
# name: ablang-test
# ---
# # **AbLang Examples**
#
# AbLang is a BERT inspired language model trained on antibody sequences. The following is a set of possible use cases of AbLang.
import ablang
# + tags=[]
heavy_ablang = ablang.pretrained("heavy")
heavy_ablang.freeze()
# -
# -----
# ## **Res-codings**
#
# The res-codings are the 768 values for each residue, describing both a residue's individual properties (e.g. size, hydrophobicity, etc.) and properties in relation to the rest of the sequence (e.g. secondary structure, position, etc.).
#
# To calculate the res-codings, you can use the mode "rescoding" as seen below.
# +
seqs = [
'EVQLVESGPGLVQPGKSLRLSCVASGFTFSGYGMHWVRQAPGKGLEWIALIIYDESNKYYADSVKGRFTISRDNSKNTLYLQMSSLRAEDTAVFYCAKVKFYDPTAPNDYWGQGTLVTVSS',
'QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYGISWVRQAPGQGLEWMGWISAYNGNTNYAQKLQGRVTMTTDTSTSTAYMELRSLRSDDTAVYYCARVLGWGSMDVWGQGTTVTVSS'
]
rescodings = heavy_ablang(seqs, mode='rescoding')
print(rescodings)
print("The shape of the output of a single sequence:", rescodings[0].shape)
print(rescodings)
# -
# ----
# An additional feature, is the ability to align the rescodings. This can be done by setting the parameter align to "True".
#
# **NB:** You need to install anarci and pandas for this feature.
# +
seqs = [
'EVQLVESGPGLVQPGKSLRLSCVASGFTFSGYGMHWVRQAPGKGLEWIALIIYDESNKYYADSVKGRFTISRDNSKNTLYLQMSSLRAEDTAVFYCAKVKFYDPTAPNDYWGQGTLVTVSS',
'QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYGISWVRQAPGQGLEWMGWISAYNGNTNYAQKLQGRVTMTTDTSTSTAYMELRSLRSDDTAVYYCARVLGWGSMDVWGQGTTVTVSS'
]
rescodings = heavy_ablang(seqs, mode='rescoding', align=True)
print("The shape of the output:", rescodings[0].aligned_embeds.shape)
print(rescodings[0].aligned_embeds)
print(rescodings[0].number_alignment)
# -
# ---------
# ## **Seq-codings**
#
# Seq-codings are a set of 768 values for each sequences, derived from averaging across the res-codings. Seq-codings allow one to avoid sequence alignments, as every antibody sequence, regardless of their length, will be represented with 768 values.
# +
seqs = [
'EVQLVESGPGLVQPGKSLRLSCVASGFTFSGYGMHWVRQAPGKGLEWIALIIYDESNKYYADSVKGRFTISRDNSKNTLYLQMSSLRAEDTAVFYCAKVKFYDPTAPNDYWGQGTLVTVSS',
'QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYGISWVRQAPGQGLEWMGWISAYNGNTNYAQKLQGRVTMTTDTSTSTAYMELRSLRSDDTAVYYCARVLGWGSMDVWGQGTTVTVSS'
]
seqcodings = heavy_ablang(seqs, mode='seqcoding')
print("The shape of the output:", seqcodings.shape)
print(seqcodings)
# -
# -----
# ## **Residue likelihood**
#
# Res- and seq-codings are both derived from the representations created by AbRep. Another interesting representation are the likelihoods created by AbHead. These values are the likelihoods of each amino acids at each position in the sequence. These can be used to explore which amino acids are most likely to be mutated into and thereby explore the mutational space.
#
# **NB:** Currently, the likelihoods includes the start and end tokens and padding.
# +
seqs = [
'EVQLVESGPGLVQPGKSLRLSCVASGFTFSGYGMHWVRQAPGKGLEWIALIIYDESNKYYADSVKGRFTISRDNSKNTLYLQMSSLRAEDTAVFYCAKVKFYDPTAPNDYWGQGTLVTVSS',
'QVQLVQSGAEVKKPGASVKVSCKASGYTFTSYGISWVRQAPGQGLEWMGWISAYNGNTNYAQKLQGRVTMTTDTSTSTAYMELRSLRSDDTAVYYCARVLGWGSMDVWGQGTTVTVSS'
]
likelihoods = heavy_ablang(seqs, mode='likelihood')
print("The shape of the output:", likelihoods.shape)
print(likelihoods)
# -
# -----
# ## **Antibody sequence restoration**
#
# In some cases, an antibody sequence is missing some residues. This could be derived from sequencing errors or limitations of current sequencing methods. To solve this AbLang has the "restore" mode, as seen below, which picks the amino acid with the highest likelihood for residues marked with an asterisk (*).
# +
seqs = [
'EV*LVESGPGLVQPGKSLRLSCVASGFTFSGYGMHWVRQAPGKGLEWIALIIYDESNKYYADSVKGRFTISRDNSKNTLYLQMSSLRAEDTAVFYCAKVKFYDPTAPNDYWGQGTLVTVSS',
'*************PGKSLRLSCVASGFTFSGYGMHWVRQAPGKGLEWIALIIYDESNK*YADSVKGRFTISRDNSKNTLYLQMSSLRAEDTAVFYCAKVKFYDPTAPNDYWGQGTL*****',
]
heavy_ablang(seqs, mode='restore')
| examples/example-ablang-usecases.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 6.2: API の概要
# +
# リスト 6.2.1 Matplotlib でグラフを描画
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
tips = sns.load_dataset("tips")
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2)
ax1.scatter(
tips.loc[tips["time"] == "Lunch", "total_bill"],
tips.loc[tips["time"] == "Lunch", "tip"],
)
ax1.set_title("time = Lunch")
ax1.set_xlabel("total_bill")
ax1.set_ylabel("tip")
ax2.scatter(
tips.loc[tips["time"] == "Dinner", "total_bill"],
tips.loc[tips["time"] == "Dinner", "tip"],
)
ax2.set_title("time = Dinner")
ax2.set_xlabel("total_bill")
ax2.set_ylabel("tip")
# +
# 6.2.2 axes-level の関数でグラフを描画
facet_grid = sns.FacetGrid(tips, col="time")
facet_grid.map(sns.scatterplot, "total_bill", "tip")
# +
# リスト 6.2.3 figure-level の関数でグラフを描画
sns.relplot(data=tips, x="total_bill", y="tip", col="time")
| notebooks/6-02.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
df = pd.read_csv('bank-full.csv', sep=';')
print(df)
# +
for col_name in df.columns:
if type(df[col_name][0]) == str:
unique = list(set(df[col_name]))
for (replacee, replacer) in zip(unique, range(len(unique))):
df[col_name] = df[col_name].replace(replacee, replacer)
print(df)
# +
import pandas as pd
from sklearn import preprocessing
x = df.values #returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
df = pd.DataFrame(x_scaled)
print(df)
# -
from sklearn.utils import shuffle
df = shuffle(df)
trainingSet = df[:35000]
testingSet = df[35001:]
XtrainingSet = trainingSet.loc[:, :15]
XtestingSet = testingSet.loc[:, :15]
ytrainingSet = trainingSet.loc[:, 16]
ytestingSet = testingSet.loc[:, 16]
print(XtrainingSet.loc[:,13:14])
import matplotlib.pyplot as plt
plt.scatter(XtrainingSet.loc[:, 7], XtrainingSet.loc[:, 8], c=df[:35000][16], cmap=plt.cm.coolwarm)
plt.xlabel('Feature 12')
plt.ylabel('Feature 11')
plt.title('Dataset plotted for Feature 12 and 11')
plt.savefig('Plotdata1211.png')
plt.show()
print(list(set(df[16])))
import matplotlib.pyplot as plt
f = plt.figure(figsize=(10,8))
plt.matshow(XtrainingSet.corr(), fignum=f.number)
plt.xticks(range(XtrainingSet.shape[1]), XtrainingSet.columns, fontsize=14, rotation=45)
plt.yticks(range(XtrainingSet.shape[1]), XtrainingSet.columns, fontsize=14)
cb = plt.colorbar()
cb.ax.tick_params(labelsize=14)
plt.title('Correlation Matrix', fontsize=16);
plt.show()
from sklearn.linear_model import LogisticRegression
import time
logistic = LogisticRegression(solver = 'lbfgs', max_iter=300)
start_time = time.time()
logistic.fit(XtrainingSet, ytrainingSet)
print("--- %s seconds ---" % (time.time() - start_time))
ypredicted_log = logistic.predict(XtestingSet)
ypredictedprob_log = logistic.predict_proba(XtestingSet)
logisticScore = logistic.score(XtestingSet, ytestingSet)
print("Predicted Probabilities for Logistic Model:\n", ypredictedprob_log)
print("\n Logistic Coefficients:\n" , logistic.coef_)
print("\n Logistic Score:\n" , logisticScore)
from sklearn import svm
svmModel = svm.SVC(kernel = 'rbf', C = 1.0, probability=True)
start_time = time.time()
svmModel.fit(XtrainingSet, ytrainingSet)
print("--- %s seconds ---" % (time.time() - start_time))
ypredicted_svm = svmModel.predict(XtestingSet)
ypredictedprob_svm = svmModel.predict_proba(XtestingSet)
svmScore = svmModel.score(XtestingSet, ytestingSet)
print("Predicted Probabilities for Logistic Model:\n", ypredictedprob_svm)
print("\n Logistic Score:\n" , svmScore)
# +
import numpy as np
from sklearn import svm
h=0.02
x_min, x_max = XtrainingSet.loc[:, 13].min() - 1, XtrainingSet.loc[:, 13].max() + 1
y_min, y_max = XtrainingSet.loc[:, 14].min() - 1, XtrainingSet.loc[:, 14].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
C = 1.0 # SVM regularization parameter
# SVC with linear kernel
svc = svm.SVC(kernel='linear', C=C).fit(XtrainingSet.loc[:,13:14], ytrainingSet)
# LinearSVC (linear kernel)
lin_svc = svm.LinearSVC(C=C).fit(XtrainingSet.loc[:,13:14], ytrainingSet)
# SVC with RBF kernel
rbf_svc = svm.SVC(kernel='rbf', gamma=0.7, C=C).fit(XtrainingSet.loc[:,13:14], ytrainingSet)
# SVC with polynomial (degree 3) kernel
poly_svc = svm.SVC(kernel='poly', degree=3, C=C).fit(XtrainingSet.loc[:,13:14], ytrainingSet)
titles = ['SVC with linear kernel',
'LinearSVC (linear kernel)',
'SVC with RBF kernel',
'SVC with polynomial (degree 3) kernel']
for i, clf in enumerate((svc, lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.coolwarm, alpha=0.8)
# Plot also the training points
plt.scatter(XtrainingSet.loc[:,13], XtrainingSet.loc[:,14], c=ytrainingSet, cmap=plt.cm.coolwarm)
plt.xlabel('Petal length')
plt.ylabel('Petal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title(titles[i])
plt.show()
# -
from sklearn.neural_network import MLPClassifier
mlpModel = MLPClassifier(solver = 'lbfgs', max_iter=300, alpha=1e-5, verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1)
start_time = time.time()
mlpModel.fit(XtrainingSet, ytrainingSet)
print("--- %s seconds ---" % (time.time() - start_time))
ypredicted_mlp = mlpModel.predict_proba(XtestingSet)
ypredictedprob_mlp = mlpModel.predict_proba(XtestingSet)
mlpScore = mlpModel.score(XtestingSet, ytestingSet)
print("MLP Score", mlpScore)
print("weights between input and first hidden layer:")
print(mlpModel.coefs_[0])
print("\nweights between first hidden and second hidden layer:")
print(mlpModel.coefs_[1])
# +
from sklearn.neighbors import KNeighborsClassifier
for i in range(100):
knnModel = KNeighborsClassifier(n_neighbors=i)
knnModel.fit(XtrainingSet, ytrainingSet)
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))
ypredicted_knn = knnModel.predict(XtestingSet)
ypredictedprob_knn = knnModel.predict_proba(XtestingSet)
knnScore = knnModel.score(XtestingSet, ytestingSet)
print(knnScore)
# -
import numpy as np
# %matplotlib inline
objects = ('LogisticR', 'SVM', 'MLP', 'KNN')
accuracyArray = np.array([logisticScore, svmScore, mlpScore, knnScore])*100
y_pos = np.arange(len(objects))
plt.bar(y_pos, accuracyArray, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel('Accuracy in %')
plt.title('Comparison Among Different Models')
plt.savefig('accuracy.png')
plt.show()
normalizedAccuracyArray = ((accuracyArray-min(accuracyArray))/(max(accuracyArray) - min(accuracyArray)))*100
plt.bar(y_pos, normalizedAccuracyArray, align='center', alpha=0.5)
plt.xticks(y_pos, objects)
plt.ylabel(' Normalized Accuracy in %')
plt.title('Comparison Among Different Models')
plt.savefig('normalizedAccuracy.png')
plt.show()
# predict probabilities
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
ns_probs = [1 for _ in range(len(ytestingSet))]
lr_probs = ypredictedprob_knn
# keep probabilities for the positive outcome only
lr_probs = lr_probs[:,1]
# calculate scores
ns_auc = roc_auc_score(ytestingSet, ns_probs)
lr_auc = roc_auc_score(ytestingSet, lr_probs)
# summarize scores
print('No Skill: ROC AUC=%.3f' % (ns_auc))
print('Logistic: ROC AUC=%.3f' % (lr_auc))
# calculate roc curves
ns_fpr, ns_tpr, _ = roc_curve(ytestingSet, ns_probs)
lr_fpr, lr_tpr, _ = roc_curve(ytestingSet, lr_probs)
# plot the roc curve for the model
plt.plot(ns_fpr, ns_tpr, linestyle='--', label='No Skill')
plt.plot(lr_fpr, lr_tpr, marker='.', label='Logistic')
# axis labels
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
# show the legend
plt.legend()
# show the plot
plt.savefig('ROC_svm_wrong.png')
plt.show()
print(ytestingSet)
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import f1_score
from sklearn.metrics import auc
# calculate precision and recall for each threshold
ns_precision, ns_recall, _ = precision_recall_curve(ytestingSet, ns_probs)
lr_precision, lr_recall, _ = precision_recall_curve(ytestingSet, lr_probs)
# calculate scores
ns_f1, ns_auc = f1_score(ytestingSet, ns_probs), auc(ns_recall, ns_precision)
lr_f1, lr_auc = f1_score(ytestingSet, ypredicted_log), auc(lr_recall, lr_precision)
# summarize scores
print('No Skill: f1=%.3f auc=%.3f' % (ns_f1, ns_auc))
print('Logistic: f1=%.3f auc=%.3f' % (lr_f1, lr_auc))
# plot the precision-recall curves
plt.plot(ns_recall, ns_precision, linestyle='--', label='No Skill')
plt.plot(lr_recall, lr_precision, marker='.', label='Logistic')
# axis labels
plt.xlabel('Recall')
plt.ylabel('Precision')
# show the legend
plt.legend()
# show the plot
plt.savefig('PR_knn.png')
plt.show()
| NewNotebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#Defining a string with "" and ''
example_string_1 = "Hello World 1"
example_string_2 = 'Hello World 2'
#Printing Strings
print(example_string_1)
print(example_string_1, example_string_2)
########### STRING OPERATIONS #############
#Joining Strings
example_string_joined = example_string_1 + " " + example_string_2 # Join two strings with a single space
print("example_string_joined : ", example_string_joined)
example_string_with_spaces = " ".join(example_string_2) #Puts space before each character in string
print("example_string_with_spaces : ", example_string_with_spaces)
#REPLACE OPERATIONS IN STRINGS
print(example_string_1.replace("World", "Mars")) #Operations need to be assigned to variable in order to be permanent
print(example_string_1)
print(example_string_1.replace("l", "x")) #All "l" characters will be replaced
print(example_string_1.replace("l", "x", 2)) #Only two "l" characters from start will be replaced
# +
#SEARCHING STRINGS
#There are .startswith() and .endswith() functions to check strings start and end
print("Is example_string_1 ends with 'World 1' :", example_string_1.endswith("World 1"))
#Returns True because string ends with given string
print("Is example_string_1 ends with 'World' :", example_string_1.endswith("World"))
#Returns False because string does not ends with given string
#.find() works from left, if you need from right .rfind() is your function
print("Index of 'World' in example_string_1 :", example_string_1.find("World"))
#Returns the index of first occurance (W's index, remember indexes starts from 0)
print("Index of 'Banana' in example_string_1 :", example_string_1.find("Banana"))
#Returns -1 because there is no Banana in our string
# +
#COUNTING CHARACTERS IN A STRING
example_string_3 = "There are some monkeys, lions, birds, and pythons in the jungle"
print("The lenght of whole string is", len(example_string_3))
print("There are", example_string_3.count(","), "commas are in the string.") #Counts "," s in string
# +
#CASE OF STRINGS (UPPER and LOWER)
lowercase_string = "python science"
uppercase_string = "YOUTUBE"
mixedcase_string = "Hi There!"
print("Is 'lowercase_string' lowercase? :", lowercase_string.islower())
print("Is 'uppercase_string' uppercase? :", uppercase_string.isupper())
print("Is 'mixedcase_string.lower()' lowercase? :", mixedcase_string.lower() , mixedcase_string.lower().islower())
print("Is 'mixedcase_string.upper()' uppercase? :", mixedcase_string.upper() , mixedcase_string.upper().isupper())
# -
#REMOVING SPACES
#from begining, end, and middle of strings
str_with_space_at_end = "I know how to change case in strings "
str_with_space_at_begin = " and check if string is upper or lower case"
str_with_space_at_both = " python is so cool "
#The method .rstrip() Removes all spaces from END of string
print("How 'rstrip' works:\r\n"
,"'" + str_with_space_at_end.rstrip() + "'\r\n"
,"'" + str_with_space_at_begin.rstrip() + "'\r\n"
,"'" + str_with_space_at_both.rstrip() + "'"
)
#The method .lstrip() Removes all spaces from BEGINING of string
print("How 'lstrip' works:\r\n"
,"'" + str_with_space_at_end.lstrip() + "'\r\n"
,"'" + str_with_space_at_begin.lstrip() + "'\r\n"
,"'" + str_with_space_at_both.lstrip() + "'"
)
#The method .strip() Removes all spaces from END and BEGINING of string
print("How 'strip' works:\r\n"
,"'" + str_with_space_at_end.strip() + "'\r\n"
,"'" + str_with_space_at_begin.strip() + "'\r\n"
,"'" + str_with_space_at_both.strip() + "'"
)
#If you need to remove all spaces in a string just use your_string.replace(" ", "")
#CONSTRUCT COLLECTION FROM LIST/ARRAY
example_string_4 = "One;Two;Three;Four"
str_array = example_string_4.split(";")
print("Type:", type(str_array), " | Array:", str_array)
# +
#LISTING ALL AVAILABLE METHODS (FUNCTIONS) FOR STRINGS
dummy_str = "Hello"
print("############### STRING METHOD LIST #############")
print("\r\n".join(dir(dummy_str)))
print("################################################")
print("DOCS FOR RPARTITION:",dummy_str.rpartition.__doc__)
# -
| Python For Beginners/Python Beginner Tutorials - 2 - Variables & Data Types - String Data Type.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # XML example and exercise
# ****
# + study examples of accessing nodes in XML tree structure
# + work on exercise to be completed and submitted
# ****
# + reference: https://docs.python.org/2.7/library/xml.etree.elementtree.html
# + data source: http://www.dbis.informatik.uni-goettingen.de/Mondial
# ****
from xml.etree import ElementTree as ET
# ## XML example
#
# + for details about tree traversal and iterators, see https://docs.python.org/2.7/library/xml.etree.elementtree.html
document_tree = ET.parse( './data/mondial_database_less.xml' )
# print names of all countries
for child in document_tree.getroot():
print(child.find('name').text)
# print names of all countries and their cities
for element in document_tree.iterfind('country'):
print ('* ' + element.find('name').text + ':',)
capitals_string = ''
for subelement in element.getiterator('city'):
capitals_string += subelement.find('name').text + ', '
print (capitals_string[:-2])
# ****
# ## XML exercise
#
# Using data in 'data/mondial_database.xml', the examples above, and refering to https://docs.python.org/2.7/library/xml.etree.elementtree.html, find
#
# 1. 10 countries with the lowest infant mortality rates
# 2. 10 cities with the largest population
# 3. 10 ethnic groups with the largest overall populations (sum of best/latest estimates over all countries)
# 4. name and country of a) longest river, b) largest lake and c) airport at highest elevation
from xml.etree import ElementTree as ET
document = ET.parse( './data/mondial_database.xml' )
root=document.getroot()
root.tag
# To find the Top 10 countries with lowest infant mortality rates:
# +
import pandas as pd
df=pd.DataFrame(columns=["names","infant_mortality"])
for country in document.findall('country'):
names=country.find('name').text
for child in country:
if child.tag=='infant_mortality':
infant_mortality=float(child.text)
df.loc[len(df)] = [names,infant_mortality]
#print(mortality)
#df.loc[len(df)] = [names,mortality]
#df = df.append(pd.Series(names, index=['country_names']), ignore_index=True)
#df
#len(names)
#names=names[0:len(mortality)]
#df=pd.DataFrame({'country_names':names,'infant_mortality':mortality})
df.sort_values(by ='infant_mortality').head(10)
# -
# To find the Top 10 cities with largest population:
# +
df2 = pd.DataFrame(columns = ["name","population"])
for country in document.findall('country'):
for city in country.iter('city'):
name=city.find('name').text
pop=city.findall('population')
#print pop We find many null values in the list and hence we can handle errors using try and except
try:
population1 = int(pop[-1].text)
except:
population1 = int(0)
df2.loc[len(df2)] = [name,population1]
df2.sort_values(by= 'population', ascending= False).head(10)
# -
# To find the Top 10 ethnic groups with largest overall population:
# +
# import pandas as pd
df3 = pd.DataFrame(columns = ["name","population","ethnicgroup","percentage","total_estimation"])
for country in document.findall('country'):
for city in country.iter('city'):
name=city.find('name').text
pop=city.findall('population')
#print pop We find many null values in the list and hence we can handle errors using try and except
try:
population1 = int(pop[-1].text)
except:
population1 = int(0)
#df2.loc[len(df2)] = [name,population1]
for ethnicgroup in country.findall('ethnicgroup'):
eg = ethnicgroup.text
percent = float(ethnicgroup.attrib['percentage'])
percent = (percent/100)
total_estimate=percent*population1
df3.loc[len(df3)] = [name, population1, eg, percent,total_estimate]
#df3.sort_values(by='total_estimation', ascending= False).head(10)
df3.groupby('ethnicgroup').sum().sort_values(by ='total_estimation', ascending = False)
# -
# Aiport with Highest elevation:
# +
elevation_high=int(0)
for air in document.findall('airport'):
name1=air.find('name').text
country1=air.get('country')
for child in air:
if child.tag=='elevation':
try:
air_elevation=float(child.text)
except:
air_elevation= int(0)
#print(air_elevation)
if air_elevation>elevation_high:
elevation_high=air_elevation
airport_name=name1
country_name=country1
print(elevation_high)
print(airport_name)
print(country_name)
#print("The airport name is"airport_name + ' '+country_name)
# -
# Longest River
# +
river_length=int(0)
for riv in document.findall('river'):
name2=riv.find('name').text
country2=riv.get('country')
for child in riv:
if child.tag=='length':
try:
river_length_in=float(child.text)
except:
river_length_in= int(0)
#print(air_elevation)
if river_length_in>river_length:
river_length=river_length_in
river_name=name2
country_river=country2
print(river_length)
print(river_name)
print(country_river)
# -
# Largest Lake:
# +
lake_area=int(0)
for lake in document.findall('lake'):
name3=lake.find('name').text
country3=lake.get('country')
for child in lake:
if child.tag=='area':
try:
lake_area_in=float(child.text)
except:
lake_area_in= int(0)
#print(air_elevation)
if lake_area_in>lake_area:
lake_area=lake_area_in
lake_name=name3
country_name=country3
print(lake_area)
print(lake_name)
print(country_name)
| XML_Mondial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EDA Dynamic - All Countries
# +
# %matplotlib inline
import pandas as pd
import numpy as np
from datetime import datetime
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objects as go
import plotly
plotly.__version__
#import webbrowser
#print(webbrowser._browsers)
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
# +
dataPath_Raw = ("../data/raw/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv")
dataPath_Processed = ("../data/processed/COVID_Flat_Table_Complete.csv")
pd.set_option("display.max_rows", 500)
# -
pd_proc = pd.read_csv(dataPath_Processed)
pd_proc.head()
pd_raw = pd.read_csv(dataPath_Raw)
country_list = pd_raw['Country/Region'].unique()
country_list;
d = dict(enumerate(country_list.flatten(), 1))
# +
label = 'label'
value = 'value'
country = list(country_list)
dlist = [{label: c, value: c} for c in country]
#print(dlist)
# +
df_plot = pd.read_csv(dataPath_Processed, sep=",")
df_plot.head()
# -
fig = go.Figure()
app = dash.Dash()
app.layout = html.Div([
html.Label('Multi-Select Country'),
dcc.Dropdown(
id = "country_drop_down",
options = dlist,
value=['US', 'Germany'],
multi=True
),
dcc.Graph(figure=fig, id="main_window_slope")
])
# +
@app.callback(
Output('main_window_slope', 'figure'),
[Input('country_drop_down', 'value')]
)
def update_figure(country_list):
traces = []
for i in country_list:
traces.append(dict(x=df_plot.date,
y = df_plot[i],
mode = 'markers+lines',
opacity = 0.9,
line_width = 0.4, marker_size = 3,
name = i
)
)
return {
'data': traces,
'layout': dict (
width = 1280,
height = 720,
xaxis_title = "Time",
yaxis_title = "Confirmed infected people [source: <NAME> csse, log-scale]",
xaxis = {
'tickangle' : 0,
'nticks': 20,
'tickfont': dict(size= 17, color = '#7f7f7f')
},
yaxis = {
'type':'log',
'range':[0, 7]
}
)
}
# -
app.run_server(debug=True, use_reloader=False)
| notebooks/4.3_EDA_Dynamic_AllCountries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quick-Start Guide
# **MLRun** is an end-to-end [open-source](https://github.com/mlrun/mlrun) MLOps solution to manage and automate your
# analytics and machine learning lifecycle, from data ingestion, through model development and full pipeline/model deployment, to model monitoring.
# Its primary goal is to ease the development of machine learning pipeline at scale and help organizations build a
# robust process for moving from the research phase to fully operational production deployments.
#
# MLRun is automating the process of moving code to production by implementing a **serverless** approach, where different tasks or services are executed over elastic **serverless functions** (read more about [MLRun functions](./runtimes/functions.md)), in this quick start guide we will use existing (marketplace) functions, see the [**tutorial**](./tutorial/index.md) with more detailed example of how to create and use functions.
# **Table of Contents**
# * [Working with MLRun](#working-with-mlrun)
# * [Train a Model](#train-a-model)
# * [Test the Model](#test-the-model)
# * [Serve the Model](#serve-the-model)
# ## Working with MLRun
# <a name="working-with-mlrun"></a>
# If you need to install MLRun, refer to the [Installation Guide](install.md).
# >**Note**: If you are using the [Iguazio MLOps Platform](https://www.iguazio.com/), MLRun already comes
# >preinstalled and integrated in your system.
#
# If you are not viewing this quick-start guide from a Jupyter Lab instance, open it on your cluster, create a
# new notebook, and copy the sections below to the notebook to run them.
# ### Set Environment
# Before you begin, initialize MLRun by calling `set_environment` and provide it with the project name. All the work will be saved and tracked under that project.
# +
import mlrun
project = mlrun.new_project('quick-start', user_project=True)
# -
# ## Train a Model
# <a name="train-a-model"></a>
# MLRun introduces the concept of [functions](./runtimes/functions.md). You can run your own code in functions, or use
# functions from the [function marketplace](https://www.mlrun.org/marketplace/). Functions can run locally or over elastic **"serverless"** engines (as containers over [kubernetes](https://kubernetes.io/)).
#
# In the example below, you'll use the [`sklearn_classifier`](https://github.com/mlrun/functions/tree/master/sklearn_classifier)
# from MLRun [function marketplace](https://www.mlrun.org/marketplace/) to train a model and use a sample dataset
# (CSV file) as the input. You can read more on how to [**use data items**](./store/datastore.md) from different data sources
# or from the [**Feature Store**](./feature-store/feature-store.md).
# **Note: When training a model in an air-gapped site** expand the cell below ..
# + [markdown] jupyter={"source_hidden": true} tags=["hide-cell"]
# > If you are working in MLRun:
# > 1. Download your data file and save it locally.
# > 2. Run:</br>
# > `import os`</br>
# > `os.environ["env_name"] = 1`
# > 2. Use the same command for the sample data set path, for example: <br>`source_url = mlrun.get_sample_path("data/iris/iris_dataset.csv")`
# >
# > If your system is integrated with an MLOps Platform:
# > 1. Download your data file and save it locally.
# > 2. In the UI, click the settings icon (<img src="./_static/images/icon-igz-settings.png" alt="Settings"/>) in the top-right of the header in any page to open the **Settings** dialog.
# > 2. Click **Environment variables | Create a new environment variable**, and set an environmental variable: SAMPLE_DATA_SOURCE_URL_PREFIX = the relative path to locally-stored data. For example: <br>`/v3io/bigdata/path/...`
# > 2. Use the same command for the sample data set path, for example: <br>`source_url = mlrun.get_sample_path("data/iris/iris_dataset.csv")`
# +
# import the training function from the marketplace (hub://)
train = mlrun.import_function('hub://sklearn_classifier')
# Get a sample dataset path (points to MLRun data samples repository)
source_url = mlrun.get_sample_path("data/iris/iris_dataset.csv")
# run the function and specify input dataset path and some parameters (algorithm and label column name)
train_run = train.run(name='train',
inputs={'dataset': source_url},
params={'model_pkg_class': 'sklearn.linear_model.LogisticRegression',
'label_column': 'label'})
# -
# The run output above contains a link to the MLRun UI. Click it to inspect the various aspects of the jobs you run:
#
# <img src="./_static/images/mlrun-quick-start/train-info.png" alt="ui-info" width="800"/>
# As well as their artifacts:
#
# <img src="./_static/images/mlrun-quick-start/train-artifacts.png" alt="ui-artifacts" width="800"/>
# When running the function in a Jupyter notebook, the output cell for your function execution contains a table with
# run information — including the state of the execution, all inputs and parameters, and the execution results and artifacts.
#
# 
# ## Test the Model
# <a name="test-the-model"></a>
# Now that you have a trained model, you can test it: run a task that uses the [test_classifier](https://github.com/mlrun/functions/tree/master/test_classifier)
# function from the function marketplace to run the selected trained model against the test dataset. The test dataset
# was returned from the training task (`train_run`) in the previous step.
test = mlrun.import_function('hub://test_classifier')
# You can then run the function as part of your project, just as any other function that you have written yourself.
# To view the function documentation, call the `doc` method:
test.doc()
# Configure parameters for the test function (`params`), and provide the selected trained model from the train task as an input artifact (`inputs`)
test_run = test.run(name="test",
params={"label_column": "label"},
inputs={"models_path": train_run.outputs['model'],
"test_set": train_run.outputs['test_set']})
# ## Serve the Model
# <a name="serve-the-model"></a>
# MLRun serving can take MLRun models or standard model files and produce managed, real-time, serverless functions using
# the [Nuclio real-time serverless framework](https://www.iguazio.com/open-source/nuclio/).
# Nuclio is built around data, I/O, and compute-intensive workloads, and is focused on performance and flexibility.
# Nuclio is also deeply integrated into the MLRun framework.
# See [MLRun Serving documentation](./serving/serving-graph.md) to learn more about the rich serving capabilities
# MLRun has to offer.
#
#
# To deploy your model using the [v2_model_server function](https://github.com/mlrun/functions/tree/master/v2_model_server),
# run the following code:
serve = mlrun.import_function('hub://v2_model_server')
model_name='iris'
serve.add_model(model_name, model_path=train_run.outputs['model'])
addr = serve.deploy()
# The `invoke` method enables to programmatically test the function.
# +
import json
inputs = [[5.1, 3.5, 1.4, 0.2],
[7.7, 3.8, 6.7, 2.2]]
my_data = json.dumps({'inputs': inputs})
serve.invoke(f'v2/models/{model_name}/infer', my_data)
# + [markdown] pycharm={"name": "#%% md\n"}
# Open the Nuclio UI to view the function and test it.
# -
# 
# <br>
#
# For a more detailed walk-through, refer to the [**getting-started tutorial**](tutorial/index.md).
| docs/quick-start.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# language: python
# name: python385jvsc74a57bd075a498e876064b852a066a747cb28d21c70864f5dbc042b9766d1ceb6eda917c
# ---
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Python Spark create RDD example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
import pandas as pd
dp = pd.read_json("http://api.luftdaten.info/static/v1/data.json")
dp
ds = spark.read.json('data/data.json')
ds.show()
ds[['id','timestamp']].show(4)
ds.columns
ds.dtypes
drop_name = ['location','sensor']
ds.drop(*drop_name).show()
##JOIN
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.sql import SparkSession
import pandas as pd
spark = SparkSession \
.builder \
.appName("Python Spark create RDD example") \
.config("spark.some.config.option", "some-value") \
.getOrCreate()
# sc= SparkContext('local','example')
# hc = HiveContext(sc)
# +
leftp = pd.DataFrame({'A': ['A0', 'A1', 'A2', 'A3'],
'B': ['B0', 'B1', 'B2', 'B3'],
'C': ['C0', 'C1', 'C2', 'C3'],
'D': ['D0', 'D1', 'D2', 'D3']},
index=[0, 1, 2, 3])
rightp = pd.DataFrame({'A': ['A0', 'A1', 'A6','A7'],
'F': ['B4', 'B5', 'B6','B7'],
'G': ['C4', 'C5', 'C6','C7'],
'H': ['D4', 'D5', 'D6','D7']},
index=[4, 5, 6, 7])
lefts = spark.createDataFrame(leftp)
rights = spark.createDataFrame(rightp)
# -
leftp
rightp
lefts.show()
rights.show()
fr=lefts.join(rights,on='A',how='left')
fr.show()
ds.show()
my_list = [('a', 2, 3),
('b', 5, 6),
('c', 8, 9),
('a', 2, 3),
('b', 5, 6),
('c', 8, 9)]
col_name = ['col1', 'col2', 'col3']
#
dp = pd.DataFrame(my_list,columns=col_name)
ds = spark.createDataFrame(my_list,schema=col_name)
dp
ds.show()
dp['concat'] = dp.apply(lambda x:'%s%s'%(x['col1'],x['col2']),axis=1)
ds.show()
# +
data = [('James','','Smith','1991-04-01','M',3000),
('Michael','Rose','','2000-05-19','M',4000),
('Robert','','Williams','1978-09-05','M',4000),
('Maria','Anne','Jones','1967-12-01','F',4000),
('Jen','Mary','Brown','1980-02-17','F',-1)
]
columns = ["firstname","middlename","lastname","dob","gender","salary"]
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('SparkByExamples.com').getOrCreate()
df = spark.createDataFrame(data=data, schema = columns)
# -
df.show()
df.withColumn("salary",col=("salary").cast("Integer")).show()
| Untitled.ipynb |
Subsets and Splits