code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Use the Azure Machine Learning data collector to log various metrics
from azureml.logging import get_azureml_logger
logger = get_azureml_logger()
# +
# Use Azure Machine Learning history magic to control history collection
# History is off by default, options are "on", "off", or "show"
# # %azureml history on
# +
# The purpose of this notebook is to train a gradient boosting based model to classify the tweets' sentiment as positive or negative.
import numpy as np
import pandas as pd
import os
import io
random_seed=123
np.random.seed(random_seed)
import tensorflow as tf
import keras
from keras import backend as K
from keras.models import Model
from keras.layers import Input, merge
from keras.layers.core import Lambda
from keras import optimizers
from keras import regularizers
from keras.models import load_model
from keras.callbacks import ModelCheckpoint
from keras.utils.np_utils import to_categorical
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.models import Sequential
from keras.layers import Input, Dense, Flatten, Embedding , Activation
from nltk.tokenize import TweetTokenizer
import re
import num2words
from timeit import default_timer as timer
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import KFold
from sklearn.externals import joblib
# +
# Path of the training file'
data_dir = r'C:\Users\ds1\Documents\AzureML\data'
# Path of the word vectors
vectors_file = r'../02_modeling/vectors/embeddings_Word2Vec_Basic.tsv'
model_identifier = 'evaluation_word2vec_gbm'
models_dir = 'model'
if not os.path.exists(models_dir):
os.makedirs(models_dir)
vector_size=50
# +
# Read and preprocess the data
pos_emoticons=["(^.^)","(^-^)","(^_^)","(^_~)","(^3^)","(^o^)","(~_^)","*)",":)",":*",":-*",":]",":^)",":}",
":>",":3",":b",":-b",":c)",":D",":-D",":O",":-O",":o)",":p",":-p",":P",":-P",":Þ",":-Þ",":X",
":-X",";)",";-)",";]",";D","^)","^.~","_)m"," ~.^","<=8","<3","<333","=)","=///=","=]","=^_^=",
"=<_<=","=>.<="," =>.>="," =3","=D","=p","0-0","0w0","8D","8O","B)","C:","d'-'","d(>w<)b",":-)",
"d^_^b","qB-)","X3","xD","XD","XP","ʘ‿ʘ","❤","💜","💚","💕","💙","💛","💓","💝","💖","💞",
"💘","💗","😗","😘","😙","😚","😻","😀","😁","😃","☺","😄","😆","😇","😉","😊","😋","😍",
"😎","😏","😛","😜","😝","😮","😸","😹","😺","😻","😼","👍"]
neg_emoticons=["--!--","(,_,)","(-.-)","(._.)","(;.;)9","(>.<)","(>_<)","(>_>)","(¬_¬)","(X_X)",":&",":(",":'(",
":-(",":-/",":-@[1]",":[",":\\",":{",":<",":-9",":c",":S",";(",";*(",";_;","^>_>^","^o)","_|_",
"`_´","</3","<=3","=/","=\\",">:(",">:-(","💔","☹️","😌","😒","😓","😔","😕","😖","😞","😟",
"😠","😡","😢","😣","😤","😥","😦","😧","😨","😩","😪","😫","😬","😭","😯","😰","😱","😲",
"😳","😴","😷","😾","😿","🙀","💀","👎"]
# Emails
emailsRegex=re.compile(r'[\w\.-]+@[\w\.-]+')
# Mentions
userMentionsRegex=re.compile(r'(?<=^|(?<=[^a-zA-Z0-9-_\.]))@([A-Za-z]+[A-Za-z0-9]+)')
#Urls
urlsRegex=re.compile('r(f|ht)(tp)(s?)(://)(.*)[.|/][^ ]+') # It may not be handling all the cases like t.co without http
#Numerics
numsRegex=re.compile(r"\b\d+\b")
punctuationNotEmoticonsRegex=re.compile(r'(?<=\w)[^\s\w](?![^\s\w])')
emoticonsDict = {} # define desired replacements here
for i,each in enumerate(pos_emoticons):
emoticonsDict[each]=' POS_EMOTICON_'+num2words.num2words(i).upper()+' '
for i,each in enumerate(neg_emoticons):
emoticonsDict[each]=' NEG_EMOTICON_'+num2words.num2words(i).upper()+' '
# use these three lines to do the replacement
rep = dict((re.escape(k), v) for k, v in emoticonsDict.items())
emoticonsPattern = re.compile("|".join(rep.keys()))
# +
def read_data(filename):
"""Read the raw tweet data from a file. Replace Emails etc with special tokens"""
with open(filename, 'r') as f:
all_lines=f.readlines()
padded_lines=[]
for line in all_lines:
line = emoticonsPattern.sub(lambda m: rep[re.escape(m.group(0))], line.lower().strip())
line = userMentionsRegex.sub(' USER ', line )
line = emailsRegex.sub(' EMAIL ', line )
line=urlsRegex.sub(' URL ', line)
line=numsRegex.sub(' NUM ',line)
line=punctuationNotEmoticonsRegex.sub(' PUN ',line)
line=re.sub(r'(.)\1{2,}', r'\1\1',line)
words_tokens=[token for token in TweetTokenizer().tokenize(line)]
line= ' '.join(token for token in words_tokens )
padded_lines.append(line)
return padded_lines
def read_labels(filename):
""" read the tweet labels from the file"""
arr= np.genfromtxt(filename, delimiter='\n')
arr[arr==4]=1 # Encode the positive category as 1
return arr
# Convert Word Vectors to Sentence Vectors
def load_word_embedding(vectors_file):
""" Load the word vectors"""
vectors= np.genfromtxt(vectors_file, delimiter='\t', comments='#--#',dtype=None,
names=['Word']+['EV{}'.format(i) for i in range(1,51)])
# comments have to be changed as some of the tokens are having # in them and then we dont need comments
vectors_dc={}
for x in vectors:
vectors_dc[x['Word'].decode('utf-8','ignore')]=[float(x[each]) for each in ['EV{}'.format(i) for i in range(1,51)]]
return vectors_dc
def get_sentence_embedding(text_data, vectors_dc):
sentence_vectors=[]
for sen in text_data:
tokens=sen.split(' ')
current_vector=np.array([vectors_dc[tokens[0]] if tokens[0] in vectors_dc else vectors_dc['<UNK>']])
for word in tokens[1:]:
if word in vectors_dc:
current_vector=np.vstack([current_vector,vectors_dc[word]])
else:
current_vector=np.vstack([current_vector,vectors_dc['<UNK>']])
min_max_mean=np.hstack([current_vector.min(axis=0),current_vector.max(axis=0),current_vector.mean(axis=0)])
sentence_vectors.append(min_max_mean)
return sentence_vectors
# Model Training
def heldout_score(clf, X_test, y_test,n_estimators =20):
"""compute deviance scores on ``X_test`` and ``y_test``. """
score = np.zeros((n_estimators,), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
score[i] = clf.loss_(y_test, y_pred)
return score
def cv_estimate(n_splits,X_train, y_train,n_estimators =20):
best_score, best_model= None,None
cv = KFold(n_splits=n_splits)
cv_clf = GradientBoostingClassifier(n_estimators=n_estimators,min_samples_leaf=3, verbose=1, loss='deviance')
val_scores = np.zeros((n_estimators,), dtype=np.float64)
i=0
for train, test in cv.split(X_train, y_train):
cv_clf.fit(X_train[train], y_train[train])
current_score= heldout_score(cv_clf, X_train[test], y_train[test],n_estimators)
val_scores += current_score
print ('Fold {} Score {}'.format(i+1, np.mean(current_score)))
if i==0:
best_score=np.mean(current_score)
best_model=cv_clf
else:
if np.mean(current_score)<best_score:
best_score=np.mean(current_score)
best_model=cv_clf
i+=1
val_scores /= n_splits
return val_scores, best_model
# +
print ('Step1: Loading Training data')
train_texts=read_data(data_dir+'/training_text.csv')
train_labels=read_labels(data_dir+'/training_label.csv')
print ('Step 2 : Load word vectors')
vectors_dc=load_word_embedding(vectors_file)
len(vectors_dc)
print ('Step 3: Convert Word vectors to sentence vectors')
train_sentence_vectors=get_sentence_embedding(train_texts,vectors_dc)
print (len(train_sentence_vectors), len(train_labels), len(train_texts))
print ("Encoding data")
train_x, valid_x, train_y, valid_y=train_test_split(train_sentence_vectors, train_labels, test_size=0.2, random_state=random_seed)
train_x=np.array(train_x).astype('float32')
valid_x=np.array(valid_x).astype('float32')
train_y=np.array(train_y)
valid_y=np.array(valid_y)
print ('Step 4: Gradient Boosting Modele using sklearn')
n_splits=3
cv_score,best_model = cv_estimate(n_splits,train_x, train_y,20)
print ('Step 5: Save the model')
model_identifier = 'evaluation_word2vec_gbm'
joblib.dump(best_model, models_dir+'//'+model_identifier)
| code/02_modeling/02_ModelCreation/03_A_ModelCreation_Word2Vec_GBM.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
# %matplotlib inline
import random
import urllib.request
import requests
from bs4 import BeautifulSoup
import warnings
warnings.filterwarnings('ignore')
# -
url = 'https://raw.githubusercontent.com/fifa-19-player-stats/data/master/fifa_raw_data.csv'
# +
df_raw = pd.read_csv(url)
df_raw.info()
# -
basic_data = df_raw[['ID', 'Photo']]
basic_data.head()
# +
df_raw.set_index('Unnamed: 0', inplace=True)
df_raw.head()
# +
# for col in df_raw.columns:
# print(col, df_raw.duplicated(subset=col, keep='first').sum())
# df_raw['Name'].duplicated()
# for i in range(len(df_raw)):
# if df_raw['Name'].duplicated():
# print (df_raw[i])
dupes = pd.concat(g for _, g in df_raw.groupby("Name") if len(g) > 1)
dupes
# -
df_raw['Name'].nunique()
df_raw['Club Logo'][0]
for col in df_raw.columns:
print(col, df_raw[col][0])
df_raw.columns
df_raw.dtypes
df_raw = pd.read_csv("https://raw.githubusercontent.com/aaptecode/buildweek_aaptecode/master/data.csv")
df_raw.head()
# +
col1 = ['ID', 'Name', 'Age', 'Photo', 'Nationality', 'Flag', 'Overall',
'Potential', 'Club', 'Club Logo', 'Value', 'Wage', 'Special',
'Preferred Foot', 'International Reputation', 'Weak Foot',
'Skill Moves', 'Work Rate', 'Body Type', 'Real Face', 'Position',
'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until',
'Height', 'Weight','Release Clause']
col2 = ['LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW',
'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM',
'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB']
col3 = ['Name','Crossing',
'Finishing', 'HeadingAccuracy', 'ShortPassing', 'Volleys', 'Dribbling',
'Curve', 'FKAccuracy', 'LongPassing', 'BallControl', 'Acceleration',
'SprintSpeed', 'Agility', 'Reactions', 'Balance', 'ShotPower',
'Jumping', 'Stamina', 'Strength', 'LongShots', 'Aggression',
'Interceptions', 'Positioning', 'Vision', 'Penalties', 'Composure',
'Marking', 'StandingTackle', 'SlidingTackle', 'GKDiving', 'GKHandling',
'GKKicking', 'GKPositioning', 'GKReflexes']
df1 = df_raw[col1]
df2 = df_raw[col2]
df3 = df_raw[col3]
df1.shape, df2.shape, df3.shape
# -
df1.head()
df1.isnull().sum()
club_col = ['Club', 'Club Logo']
logo_df = df_raw[club_col].copy()
logo_df.head()
logo_df[logo_df.Club.isnull()]
df2.info()
df2.isnull().sum()
df3.info()
df3.isnull().sum()
df3.columns
df3 = df3.fillna(0) # Filled null values with zeros. There were only 48 null values
df3.isnull().sum()
df3.head()
df3.describe()
df3.groupby(['Name', 'Penalties'])['Penalties'].mean()
df3.groupby(['Name', 'Dribbling'])['Dribbling'].mean()
# +
#Grouping by Nationality
#Creating a dictionary called continent to map the countries to their continents
continents = {
'Africa' : ['Algeria','Angola','Benin','Botswana','Burkina','Burundi','Cameroon','Cape Verde','Central African Republic','Chad','Comoros','Congo','DR Congo','Djibouti','Egypt','Equatorial Guinea','Eritrea','Ethiopia','Gabon','Gambia','Ghana','Guinea','Guinea Bissau','Ivory Coast','Kenya','Lesotho','Liberia','Libya','Madagascar','Malawi','Mali','Mauritania','Mauritius','Morocco','Mozambique','Namibia','Niger','Nigeria','Rwanda','Sao Tome and Principe','Senegal','Seychelles','Sierra Leone','Somalia','South Africa','South Sudan','Sudan','Swaziland','Tanzania','Togo','Tunisia','Uganda','Zambia','Zimbabwe','Burkina Faso'],
'Antarctica' : ['Fiji','Kiribati','Marshall Islands','Micronesia','Nauru','Palau','Papua New Guinea','Samoa','Solomon Islands','Tonga','Tuvalu','Vanuatu'],
'Asia' : ['Afghanistan','Bahrain','Bangladesh','Bhutan','Brunei','Burma (Myanmar)','Cambodia','China','China PR','East Timor','India','Indonesia','Iran','Iraq','Israel','Japan','Jordan','Kazakhstan','North Korea','South Korea','Korea Republic','Korea DPR','Kuwait','Kyrgyzstan','Laos','Lebanon','Malaysia','Maldives','Mongolia','Nepal','Oman','Pakistan','Palestine','Philippines','Qatar','Russian Federation','Saudi Arabia','Singapore','Sri Lanka','Syria','Tajikistan','Thailand','Turkey','Turkmenistan','United Arab Emirates','Uzbekistan','Vietnam','Yemen','Russia'],
'Australia Oceania' : ['Australia','New Caledonia', 'New Zealand'],
'Europe' : ['Albania','Andorra','Armenia','Austria','Azerbaijan','Belarus','Belgium','Bosnia Herzegovina','Bulgaria','Croatia','Cyprus','Czech Republic','Denmark','Estonia','Finland','France','FYR Macedonia','Georgia','Germany','Greece','Hungary','Iceland','Ireland','Italy','Kosovo','Latvia','Liechtenstein','Lithuania','Luxembourg','Macedonia','Malta','Moldova','Monaco','Montenegro','Netherlands','Northern Ireland','Norway','Poland','Portugal','Romania','San Marino','Scotland','Serbia','Slovakia','Slovenia','Spain','Sweden','Switzerland','Ukraine','England','Vatican City','Republic of Ireland','Wales'],
'North America' : ['Antigua and Barbuda','Bahamas','Barbados','Belize','Canada','Costa Rica','Cuba','Dominica','Dominican Republic','El Salvador','Grenada','Guatemala','Haiti','Honduras','Jamaica','Mexico','Nicaragua','Panama','Saint Kitts and Nevis','Saint Lucia','Saint Vincent and the Grenadines','Trinidad and Tobago','United States'],
'South America' : ['Argentina','Bolivia','Brazil','Chile','Colombia','Curacao','Ecuador','Guyana','Paraguay','Peru','Suriname','Trinidad & Tobago','Uruguay','Venezuela']
}
#Creating a function that assigns continent to each country
def find_continent(x, continents_list):
# Iteration over
for key in continents_list:
if x in continents_list[key]:
return key
return np.NaN
#Applying the function and creating the continent column
df1['Continent'] = df1['Nationality'].apply(lambda x: find_continent(x, continents))
#Selecting the top 1000 players from the dataset based on the Overall
top_1000 = df1.sort_values("Overall", ascending=False).reset_index().head(1000)[["Name", "Nationality", "Continent", "Overall", "Club", "Jersey Number"]]
top_1000
# -
for i in df3:
df3[i] = list(df3[i])
fig, ax = plt.subplots()
fig.set_size_inches(14,14)
ax = sns.heatmap(df3.corr(),cmap = "Purples").set_title('Correlation matrix')
top10 = list(df1['Nationality'].value_counts().head(10).index)
top10
d = df1.query('Nationality in @top10')
box_overall = sns.boxplot(data = d, y = 'Nationality', x = 'Overall',palette = 'Reds')
# +
player = pd.read_csv('https://raw.githubusercontent.com/fifa-19-player-stats/data/master/fifa_raw_data.csv',
index_col = 0)
def str2number(amount):
if amount[-1] == 'M':
return float(amount[1:-1])*1000000
elif amount[-1] == 'K':
return float(amount[1:-1])*1000
else:
return float(amount[1:])
player['ValueNum'] = player['Value'].apply(lambda x: str2number(x))
player['WageNum'] = player['Wage'].apply(lambda x: str2number(x))
clubs = ['FC Barcelona', 'Real Madrid CF', 'Paris Saint-Germain', 'FC Bayern Munich', 'Manchester United',
'Chelsea', 'Juventus', 'Arsenal', 'Manchester City', 'Milan']
c = player.query('Club in @clubs')
wage1 = sns.boxplot(data = c, y = 'Club', x = 'WageNum')
# -
wage2 = sns.violinplot(data = c, y = 'Club', x = 'WageNum')
# +
abbreviations = ('GK', 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM',
'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB',
'LCB', 'CB', 'RCB', 'RB')
positions = ('Goal Keeper', 'Left Side Player', 'Striker', 'Right Side Player', 'Left Winger',
'Left Forward', 'Center Forward', 'Right Forward', 'Right Winger', 'Left Attacking Midfielder',
'Center Attacking Midfielder', 'Right Attacking Midfielder', 'Left Midfield', 'Left Center Midfield',
'Center Midfield', 'Right Center Midfielder', 'Right Midfielder', 'Left Wing Back', 'Left Defensive Midfielder',
'Center Defensive Midfielder', 'Right Defensive Midfielder', 'Right Wing Back',
'Left Full Back', 'Left Center Back', 'Center Back', 'Right Center Back', 'Right Full Back')
df_positions = pd.DataFrame({'Abbreviation': abbreviations, 'Position': positions})
df_positions
# -
df_raw['Name'] + ' ' + df_raw['LS']
| code/FIFA19.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .fs
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: F#
// language: fsharp
// name: ifsharp
// ---
// ## Sylvester.Arithmetic
//
// ### Introduction
// This library implements type-level natural number arithmetic and constraints in F# which enable arithmetic operations like bounds checking to be performed at compile-time by the F# type checker as long as the values for the operations are known at compile-time.
// +
#load "../include/Math.fsx"
open Sylvester
open Sylvester.Arithmetic
open Sylvester.Arithmetic.N10
///Create typed representations of some natural numbers
let a,b,c = new N<400>(), new N<231111>(), new N<6577700>()
a + b + c
// -
// These values have types derived from *Sylvester.Arithmetic.N10*. The type of ``a`` is *N10<0,0,0,0,0,0,0,4,0,0>* and the type of ``c`` is *N10<0,0,0,6,5,7,7,7,0,0>*.
c.GetType()
// The types of the results of arithmetic operations depend on the values of the operands.
// +
let d = (a + b + c) * four
d
// -
d.GetType()
// This enables type-level constraints to be written which run at compile-time
N<4>.i +== four ///Compare two instances of N<4>
///Type level comparison operators return types dependent on the result of the comparison
N<4>.i +== thousand
/// The check functions performs type level checks.
check(d +< ten) ///This check causes a type error at compile time
a - (two * a) /// A negative number results in a type representing an underflow at compile-time
// +
/// Define a function myop using type-level arithmetic constraintss
let myop a b c d =
check(a +> b) /// Use the +> and +< operators for type-level comparisons
check (b +== zero) /// +== Type level equality operator
check (c +== (a + one))
a + b + c + d
/// Call the function with values that don't satisfy the constraints
/// This will cause a type error and the code won't compile
myop four N<1000>.i three two
// +
let myop a b c d =
check(a +> b)
check (b +== zero)
check (c +== (a + one))
a + b + c + d
/// This call to myop satisfies the arithmetic constraints so the code compiles
myop five zero six N<10>.i
// -
// Mixing natural number and primitive types result in the natural numbers being lowered to their primitive representation.
seven + eight
seven + eight + 1
// In IDEs with IntelliSense like Visual Studio you can get immediate feedback on type checks
// as you enter code:
// 
// ### How it works
// Natural numbers are represented by fixed=point decimal types. The basic concept is described here: https://wiki.haskell.org/The_Monad.Reader/Issue5/Number_Param_Types#Fixed-precision_decimal_types
//
// and an initial implementation for F# was created by MingTang here: https://github.com/Ming-Tang/TypeLevel/blob/master/TypeLevel/Base10.fs
//
// The implementation in Sylvester.Arithmetic uses a 10-digit fixed-point representation as well as adding the two arithmetic comparison operators +< and +>. It also utilizes the F# type provider mechanism to make creating types out of compile-time numeric values as seamless as possible.
| examples/math/Sylvester.Arithmetic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"source_hidden": true}
from gs_quant.markets.securities import AssetIdentifier, SecurityMaster
from gs_quant.timeseries.measures import forward_vol, VolReference, implied_volatility
from gs_quant.timeseries.algebra import *
from gs_quant.timeseries.analysis import *
from gs_quant.data import DataContext
from gs_quant.instrument import EqOption
from gs_quant.markets.portfolio import Portfolio
from gs_quant.risk import EqDelta, EqGamma, EqVega, DollarPrice, Price
from gs_quant.markets import PricingContext
from gs_quant.backtests.triggers import *
from gs_quant.backtests.actions import *
from gs_quant.backtests.strategy import Strategy
from gs_quant.backtests.equity_vol_engine import EquityVolEngine
from gs_quant.session import GsSession, Environment
from gs_quant.target.backtests import FlowVolBacktestMeasure
from dateutil.relativedelta import relativedelta
from datetime import date, datetime
import matplotlib.pyplot as plt
import pandas as pd
import warnings
warnings.filterwarnings('ignore')
pd.options.display.float_format = '{:,.2f}'.format
# + jupyter={"source_hidden": true}
# external users should substitute their client id and secret; please skip this step if using internal jupyterhub
GsSession.use(Environment.PROD, client_id=None, client_secret=None, scopes=('run_analytics',))
# -
# In this notebook, we will take a look at historical and future implied trends in equity implied volatility, formulate a trade idea, analyze a trading strategy backtest and calculate spot price and greeks. This is an example of the GS Quant functionalities and it is not a recommendation of a trade in the instruments discussed, please follow up with your salesperson to discuss specific implementations.
#
# * [1 - Analyze Vol Spread](#2---Analyze-Vol-Spread)
# * [2 - Trade Idea & Backtest](#3---Trade-Idea-&-Backtest)
# * [3 - Compare Performance](#4---Compare-Performance)
# * [4 - Price & Greeks](#5---Price-&-Greeks)
# ## 1 - Analyze Vol Spread
#
# Let's start by looking at historical forward vol for SPX and SX5E.
def forward_vol_spread(asset1, asset2, tenor, forward_date):
'''
Calculate the forward starting vol spread between two assets
'''
asset1_sec = SecurityMaster.get_asset(asset1, AssetIdentifier.BLOOMBERG_ID)
asset2_sec = SecurityMaster.get_asset(asset2, AssetIdentifier.BLOOMBERG_ID)
implied = lambda asset: implied_volatility(asset, tenor, VolReference.FORWARD, 100)
forward = lambda asset: forward_vol(asset, tenor, forward_date, VolReference.FORWARD, 100)
asset1_fvol = implied(asset1_sec) if forward_date=='0m' else forward(asset1_sec)
asset2_fvol = implied(asset2_sec) if forward_date=='0m' else forward(asset2_sec)
return subtract(asset1_fvol, asset2_fvol)
# +
start_date, end_date = date(2014, 1, 1), date(2020, 11, 2)
DataContext.current = DataContext(start=start_date, end=end_date)
sx5e_spx_fvs = forward_vol_spread('SX5E', 'SPX', '2y', '1y')
current_roll_data = {}
for rMonth in [0, 6, 9, 12]:
d = end_date + relativedelta(months=rMonth)
months_to_start = 12 - rMonth
curve = forward_vol_spread('SX5E', 'SPX', '2y', f'{months_to_start}m')
if curve.size:
current_roll_data[d] = last_value(curve)
current_roll = pd.Series(current_roll_data)
# + jupyter={"source_hidden": true}
sx5e_spx_fvs.plot(figsize=(12, 6), legend=True, label='SX5E vs SPX', title='Vol Spread')
current_roll.plot(figsize=(12, 6), legend=True, label='Current Spread Roll-Up')
plt.show()
# -
# ## 2 - Trade Idea & Backtest
#
# The analysis shows a projected narrowing of vol spread between SX5E and SPX. Let's create an option trading strategy that takes advantage of this and backtest it to see how it could have performed.
# +
# Create instruments
sx5e_straddle = (EqOption('.STOXX50E', '3y', 'ATMF', 'Call'), EqOption('.STOXX50E', '3y', 'ATMF','Put'))
spx_straddle = (EqOption('.SPX', '3y', 'ATMF', 'Call', multiplier=1), EqOption('.SPX', '3y', 'ATMF', 'Put', multiplier=1))
# Define triggers for trade and hedge actions.
# Trade and roll 1 sx5e_straddle every 1m
trade_sx5e_action = EnterPositionQuantityScaledAction(priceables=Portfolio(sx5e_straddle), trade_duration='1m', trade_quantity=1, trade_quantity_type=BacktestTradingQuantityType.quantity)
trade_sx5e_trigger = PeriodicTrigger(trigger_requirements=PeriodicTriggerRequirements(start_date=start_date, end_date=end_date, frequency='1m'),
actions=trade_sx5e_action)
# Hedge sx5e_straddle delta every business day
hedge_sx5e_action = HedgeAction(EqDelta, priceables=Portfolio(sx5e_straddle), trade_duration='B')
hedge_sx5e_trigger = PeriodicTrigger(trigger_requirements=PeriodicTriggerRequirements(start_date=start_date, end_date=end_date, frequency='B'),
actions=hedge_sx5e_action)
# Trade and roll 1 spx_straddle every 1m
trade_spx_action = EnterPositionQuantityScaledAction(priceables=Portfolio(spx_straddle), trade_duration='1m', trade_quantity=1, trade_quantity_type=BacktestTradingQuantityType.quantity)
trade_spx_trigger = PeriodicTrigger(trigger_requirements=PeriodicTriggerRequirements(start_date=start_date, end_date=end_date, frequency='1m'),
actions=trade_spx_action)
# Hedge spx_straddle delta every business day
hedge_spx_action = HedgeAction(EqDelta, priceables=Portfolio(spx_straddle), trade_duration='B')
hedge_spx_trigger = PeriodicTrigger(trigger_requirements=PeriodicTriggerRequirements(start_date=start_date, end_date=end_date, frequency='B'),
actions=hedge_spx_action)
EqVolE = EquityVolEngine()
# Define SX5E backtest
triggers_sx5e = [trade_sx5e_trigger, hedge_sx5e_trigger]
strategy_sx5e_result = EqVolE.run_backtest(Strategy(None, triggers_sx5e), start=start_date, end=end_date)
perf_sx5e = strategy_sx5e_result.get_measure_series(FlowVolBacktestMeasure.PNL)
# Define SPX backtest
triggers_spx = [trade_spx_trigger, hedge_spx_trigger]
strategy_spx_result = EqVolE.run_backtest(Strategy(None, triggers_spx), start=start_date, end=end_date)
perf_spx = strategy_spx_result.get_measure_series(FlowVolBacktestMeasure.PNL)
# + jupyter={"source_hidden": true}
perf_sx5e_spx_spread = subtract(perf_sx5e, perf_spx)
perf_sx5e_spx_spread.index = pd.DatetimeIndex(perf_sx5e_spx_spread.index)
perf_sx5e_spx_spread.plot(figsize=(12, 6), title='Strategy SX5E vs SPX 3y Vol Spread')
plt.show()
# -
# ## 3 - Compare Performance
#
# Now let's plot the trading strategy with the actual SX5E vs SPX vol spread to see how the two compare.
# +
fig, ax1 = plt.subplots()
# left axis
ax1.set_xlabel('date')
ax1.set_ylabel('Vol Spread',color='tab:red')
#right axis
ax2 = ax1.twinx()
ax2.set_ylabel('Strategy PNL', color='tab:blue')
sx5e_spx_fvs.plot(figsize=(12, 6), ax=ax1, color='tab:red')
current_roll.plot(figsize=(12, 6), ax=ax1, color='tab:green', legend=True, label='Current Spread Roll-Up')
perf_sx5e_spx_spread.plot(figsize=(12, 6), ax=ax2, color='tab:blue')
fig.tight_layout()
plt.xlim(start_date, end_date + relativedelta(years=1))
plt.show()
# -
# ## 4 - Price & Greeks
#
# Finallly, let's calculate current strategy price and greeks.
# +
sx5e_straddle_port, spx_straddle_port = Portfolio(sx5e_straddle), Portfolio(spx_straddle)
# Price and greeks in USD
sx5e_straddle_price = sx5e_straddle_port.calc(DollarPrice).aggregate()
spx_straddle_price = spx_straddle_port.calc(DollarPrice).aggregate()
print(f'Strategy Price$: {sx5e_straddle_price - spx_straddle_price}')
greeks = (EqDelta, EqGamma, EqVega)
sx5e_straddle_greeks = sx5e_straddle_port.calc(greeks).aggregate().to_frame()
spx_straddle_greeks = spx_straddle_port.calc(greeks).aggregate().to_frame()
pd.concat([spx_straddle_greeks,sx5e_straddle_greeks],keys=['SPX', 'SX5E']).transpose()
# -
# ### Disclaimers
#
# Indicative Terms/Pricing Levels: This material may contain indicative terms only, including but not limited to pricing levels. There is no representation that any transaction can or could have been effected at such terms or prices. Proposed terms and conditions are for discussion purposes only. Finalized terms and conditions are subject to further discussion and negotiation.
# www.goldmansachs.com/disclaimer/sales-and-trading-invest-rec-disclosures.html If you are not accessing this material via Marquee ContentStream, a list of the author's investment recommendations disseminated during the preceding 12 months and the proportion of the author's recommendations that are 'buy', 'hold', 'sell' or other over the previous 12 months is available by logging into Marquee ContentStream using the link below. Alternatively, if you do not have access to Marquee ContentStream, please contact your usual GS representative who will be able to provide this information to you.
# Backtesting, Simulated Results, Sensitivity/Scenario Analysis or Spreadsheet Calculator or Model: There may be data presented herein that is solely for illustrative purposes and which may include among other things back testing, simulated results and scenario analyses. The information is based upon certain factors, assumptions and historical information that Goldman Sachs may in its discretion have considered appropriate, however, Goldman Sachs provides no assurance or guarantee that this product will operate or would have operated in the past in a manner consistent with these assumptions. In the event any of the assumptions used do not prove to be true, results are likely to vary materially from the examples shown herein. Additionally, the results may not reflect material economic and market factors, such as liquidity, transaction costs and other expenses which could reduce potential return.
# OTC Derivatives Risk Disclosures:
# Terms of the Transaction: To understand clearly the terms and conditions of any OTC derivative transaction you may enter into, you should carefully review the Master Agreement, including any related schedules, credit support documents, addenda and exhibits. You should not enter into OTC derivative transactions unless you understand the terms of the transaction you are entering into as well as the nature and extent of your risk exposure. You should also be satisfied that the OTC derivative transaction is appropriate for you in light of your circumstances and financial condition. You may be requested to post margin or collateral to support written OTC derivatives at levels consistent with the internal policies of Goldman Sachs.
#
# Liquidity Risk: There is no public market for OTC derivative transactions and, therefore, it may be difficult or impossible to liquidate an existing position on favorable terms. Transfer Restrictions: OTC derivative transactions entered into with one or more affiliates of The Goldman Sachs Group, Inc. (Goldman Sachs) cannot be assigned or otherwise transferred without its prior written consent and, therefore, it may be impossible for you to transfer any OTC derivative transaction to a third party.
#
# Conflict of Interests: Goldman Sachs may from time to time be an active participant on both sides of the market for the underlying securities, commodities, futures, options or any other derivative or instrument identical or related to those mentioned herein (together, "the Product"). Goldman Sachs at any time may have long or short positions in, or buy and sell Products (on a principal basis or otherwise) identical or related to those mentioned herein. Goldman Sachs hedging and trading activities may affect the value of the Products.
#
# Counterparty Credit Risk: Because Goldman Sachs, may be obligated to make substantial payments to you as a condition of an OTC derivative transaction, you must evaluate the credit risk of doing business with Goldman Sachs or its affiliates.
#
# Pricing and Valuation: The price of each OTC derivative transaction is individually negotiated between Goldman Sachs and each counterparty and Goldman Sachs does not represent or warrant that the prices for which it offers OTC derivative transactions are the best prices available, possibly making it difficult for you to establish what is a fair price for a particular OTC derivative transaction; The value or quoted price of the Product at any time, however, will reflect many factors and cannot be predicted. If Goldman Sachs makes a market in the offered Product, the price quoted by Goldman Sachs would reflect any changes in market conditions and other relevant factors, and the quoted price (and the value of the Product that Goldman Sachs will use for account statements or otherwise) could be higher or lower than the original price, and may be higher or lower than the value of the Product as determined by reference to pricing models used by Goldman Sachs. If at any time a third party dealer quotes a price to purchase the Product or otherwise values the Product, that price may be significantly different (higher or lower) than any price quoted by Goldman Sachs. Furthermore, if you sell the Product, you will likely be charged a commission for secondary market transactions, or the price will likely reflect a dealer discount. Goldman Sachs may conduct market making activities in the Product. To the extent Goldman Sachs makes a market, any price quoted for the OTC derivative transactions, Goldman Sachs may differ significantly from (i) their value determined by reference to Goldman Sachs pricing models and (ii) any price quoted by a third party. The market price of the OTC derivative transaction may be influenced by many unpredictable factors, including economic conditions, the creditworthiness of Goldman Sachs, the value of any underlyers, and certain actions taken by Goldman Sachs.
#
# Market Making, Investing and Lending: Goldman Sachs engages in market making, investing and lending businesses for its own account and the accounts of its affiliates in the same or similar instruments underlying OTC derivative transactions (including such trading as Goldman Sachs deems appropriate in its sole discretion to hedge its market risk in any OTC derivative transaction whether between Goldman Sachs and you or with third parties) and such trading may affect the value of an OTC derivative transaction.
#
# Early Termination Payments: The provisions of an OTC Derivative Transaction may allow for early termination and, in such cases, either you or Goldman Sachs may be required to make a potentially significant termination payment depending upon whether the OTC Derivative Transaction is in-the-money to Goldman Sachs or you at the time of termination. Indexes: Goldman Sachs does not warrant, and takes no responsibility for, the structure, method of computation or publication of any currency exchange rates, interest rates, indexes of such rates, or credit, equity or other indexes, unless Goldman Sachs specifically advises you otherwise.
# Risk Disclosure Regarding futures, options, equity swaps, and other derivatives as well as non-investment-grade securities and ADRs: Please ensure that you have read and understood the current options, futures and security futures disclosure document before entering into any such transactions. Current United States listed options, futures and security futures disclosure documents are available from our sales representatives or at http://www.theocc.com/components/docs/riskstoc.pdf, http://www.goldmansachs.com/disclosures/risk-disclosure-for-futures.pdf and https://www.nfa.futures.org/investors/investor-resources/files/security-futures-disclosure.pdf, respectively. Certain transactions - including those involving futures, options, equity swaps, and other derivatives as well as non-investment-grade securities - give rise to substantial risk and are not available to nor suitable for all investors. If you have any questions about whether you are eligible to enter into these transactions with Goldman Sachs, please contact your sales representative. Foreign-currency-denominated securities are subject to fluctuations in exchange rates that could have an adverse effect on the value or price of, or income derived from, the investment. In addition, investors in securities such as ADRs, the values of which are influenced by foreign currencies, effectively assume currency risk.
# Options Risk Disclosures: Options may trade at a value other than that which may be inferred from the current levels of interest rates, dividends (if applicable) and the underlier due to other factors including, but not limited to, expectations of future levels of interest rates, future levels of dividends and the volatility of the underlier at any time prior to maturity. Note: Options involve risk and are not suitable for all investors. Please ensure that you have read and understood the current options disclosure document before entering into any standardized options transactions. United States listed options disclosure documents are available from our sales representatives or at http://theocc.com/publications/risks/riskstoc.pdf. A secondary market may not be available for all options. Transaction costs may be a significant factor in option strategies calling for multiple purchases and sales of options, such as spreads. When purchasing long options an investor may lose their entire investment and when selling uncovered options the risk is potentially unlimited. Supporting documentation for any comparisons, recommendations, statistics, technical data, or other similar information will be supplied upon request.
# This material is for the private information of the recipient only. This material is not sponsored, endorsed, sold or promoted by any sponsor or provider of an index referred herein (each, an "Index Provider"). GS does not have any affiliation with or control over the Index Providers or any control over the computation, composition or dissemination of the indices. While GS will obtain information from publicly available sources it believes reliable, it will not independently verify this information. Accordingly, GS shall have no liability, contingent or otherwise, to the user or to third parties, for the quality, accuracy, timeliness, continued availability or completeness of the data nor for any special, indirect, incidental or consequential damages which may be incurred or experienced because of the use of the data made available herein, even if GS has been advised of the possibility of such damages.
# iTraxx® is a registered trade mark of International Index Company Limited.
# iTraxx® is a trade mark of International Index Company Limited and has been licensed for the use by Goldman Sachs Japan Co., Ltd. International Index Company Limited does not approve, endorse or recommend Goldman Sachs Japan Co., Ltd. or iTraxx® derivatives products.
# iTraxx® derivatives products are derived from a source considered reliable, but neither International Index Company Limited nor any of its employees, suppliers, subcontractors and agents (together iTraxx Associates) guarantees the veracity, completeness or accuracy of iTraxx® derivatives products or other information furnished in connection with iTraxx® derivatives products. No representation, warranty or condition, express or implied, statutory or otherwise, as to condition, satisfactory quality, performance, or fitness for purpose are given or assumed by International Index Company Limited or any of the iTraxx Associates in respect of iTraxx® derivatives products or any data included in such iTraxx® derivatives products or the use by any person or entity of iTraxx® derivatives products or that data and all those representations, warranties and conditions are excluded save to the extent that such exclusion is prohibited by law.
# None of International Index Company Limited nor any of the iTraxx Associates shall have any liability or responsibility to any person or entity for any loss, damages, costs, charges, expenses or other liabilities whether caused by the negligence of International Index Company Limited or any of the iTraxx Associates or otherwise, arising in connection with the use of iTraxx® derivatives products or the iTraxx® indices.
# Standard & Poor's ® and S&P ® are registered trademarks of The McGraw-Hill Companies, Inc. and S&P GSCI™ is a trademark of The McGraw-Hill Companies, Inc. and have been licensed for use by the Issuer. This Product (the "Product") is not sponsored, endorsed, sold or promoted by S&P and S&P makes no representation, warranty or condition regarding the advisability of investing in the Product.
#
| gs_quant/content/events/00_gsquant_meets_markets/01_ideas_for_risk_re_rating/eq_sx5e_spx_vol_spread_trade.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dstn
# language: python
# name: dstn
# ---
# %matplotlib inline
import matplotlib
#matplotlib.use('Agg')
import numpy as np
from astrometry.util.fits import *
import pylab as plt
import sys
sys.path.insert(0, '/global/homes/d/dstn/.local/lib/python3.6/site-packages')
import astropy
print(astropy.__file__)
sys.version
'''
Pointing offsets are computed in lines 754 to 769.
"tan" is tan(zenith-distance)
"psi" is parallactic angle (same as is displayed in the control room).
"DEG2RAD" is pi/180.
"polyeval" is a function that evaluates a polynomial.
The coefficients are stored as a list of numbers in "xoffcoeff()" and
"yoffcoeff()" The polynomial turns out to be 4th order.
The numerical values for the pointing offset coeffs are here:
https://desi.lbl.gov/trac/browser/code/online/DervishTools/trunk/desi/etc/desi/xoffcoeff.par
https://desi.lbl.gov/trac/browser/code/online/DervishTools/trunk/desi/etc/desi/yoffcoeff.par
The units are arcmin.
''';
'''
set tan [expr tan($zd*$DEG2RAD)]
...
#Focal plane center. I will invent two global storage elements, although
#I don't use them anywhere
global xoffcoeff yoffcoeff scalecoeff rotcoeff
global xoffth yoffth scaleth rotth
if {[info exists xoffth]} {unset xoffth}
if {[info exists yoffth]} {unset yoffth}
if {[info exists scaleth]} {unset scaleth}
if {[info exists rotth]} {unset rotth}
set xoffth0 [polyeval $tan $xoffcoeff()]
set yoffth0 [polyeval $tan $yoffcoeff()]
#These rotate with the parallactic angle
set cs [expr cos($psi*$DEG2RAD)]
set sn [expr sin($psi*$DEG2RAD)]
set xoffth [expr $xoffth0*$cs - $yoffth0*$sn]
set yoffth [expr $yoffth0*$cs + $xoffth0*$sn]
''';
'''
proc polyeval {xlist coefflist} {
set ylist ""
loop i 0 [llength $xlist] {
set comp 0.
set x [lindex $xlist $i]
loop j 0 [llength $coefflist] {
set comp [expr $comp + [lindex $coefflist $j]*pow($x,$j)]
}
lappend ylist $comp
}
return $ylist
}
''';
'''
The angles for the adc are computed in lines 774 to 806.
There are two angles, one for each element of the ADC.
Ultimately, it is another polynomial, this one second order.
The numerical values for the adc coeffs are here:
https://desi.lbl.gov/trac/browser/code/online/DervishTools/trunk/desi/etc/desi/PRISM.par
The coefficients evaluate to radians, which are converted to degrees
before being stored.
set tan [expr tan($zd*$DEG2RAD)]
foreach id $PRISM() {
set nterm [llength $PRISM($id)]
set coefflist ""
set x $tan
if {$nterm > 3} {
set x [expr sqrt($x)]
}
foreach var $PRISM($id) {
lappend coefflist $PRISM($id,$var)
}
lappend adc() $id
#Note that coefflist returns phi in radians. Normally I want degrees.
set adc($id) [expr [polyeval $x $coefflist]/$DEG2RAD]
#Add in parallactic angle
set adc($id) [expr $adc($id) + $psi]
#Range is 0 to 360
if {$adc($id) < 0.} {set adc($id) [expr $adc($id) + 360.]}
if {$adc($id) >= 360.} {set adc($id) [expr $adc($id) - 360.]}
}
#For provenance tracking, I will cache the input zd and psi in the adc
#array
set adc(zd) $zd
set adc(psi) $psi
''';
def cosd(d):
return np.cos(np.deg2rad(d))
def sind(d):
return np.sin(np.deg2rad(d))
def tand(d):
return np.tan(np.deg2rad(d))
xoffcoeff = np.array([2.0757544727934544e-06, -0.007961331238163066, 0.0056061248311369436,
0.0018986691756501632, -0.0013070691347911939])
yoffcoeff = np.array([-3.4502816243324724e-05, 0.47349367638360584, 0.21307640716964973,
0.037039201803243993, -0.055050297665415526])
def polyeval(t, coeffs):
'''
t: 1-d vector
'''
#print(t.shape)
#print(coeffs.shape)
nc = len(coeffs)
return np.sum(t[:,np.newaxis]**np.arange(nc)[np.newaxis,:] * coeffs, axis=1)
zd = np.linspace(0,60, 300)
plt.subplot(1,2,1)
plt.plot(zd, polyeval(tand(zd), xoffcoeff))
plt.xlabel('Zenith distance (deg)')
plt.ylabel('Xoffth0 (arcmin)');
plt.subplot(1,2,2)
plt.plot(zd, polyeval(tand(zd), yoffcoeff))
plt.xlabel('Zenith distance (deg)')
plt.ylabel('Yoffth0 (arcmin)');
from astropy.time import Time
from astropy.coordinates import SkyCoord, AltAz
from astropy.coordinates import EarthLocation
from astropy import units as u
from astropy.utils import iers
#iers.conf.auto_download = False
gfaexptime = 10.
specexptime = 1200.
overheadtime = 120.
# +
from math import fmod
from numpy import sin, cos, degrees, radians, arctan2, arcsin, arccos, tan
def kent_wrap_at(angle, wrap_angle):
"""Wrap angle so it falls between wrap_angle - 360 and wrap_angle.
Args:
angle: an angle in degrees
wrap_angle: the max (exclusive) angle the wrapped rep. should take
Returns:
the wrapped angle
"""
if abs(wrap_angle) > 360:
raise NotImplementedError
wrapped = angle % 360
if wrapped < wrap_angle:
wrapped += 360.0
if wrapped >= wrap_angle:
wrapped -= 360.0
return wrapped
def kent_sidereal_time(mjd, longitude):
"""Local Sidereal Time at an mjd and longitude.
This calculation ignore precession and nutation, and it's precesion
is limited to ~10 asec.
There is a "drop in" alternative implemented using astropy in
astron_ap.sidereal_time.
Args:
mjd: MJD in days
longitude: longitude in degrees, positive West
Returns:
sidereal time in degrees
"""
# lst0 is LST at longitude = 0
lst0 = 360*fmod(mjd+(mjd-52903.54875)*(366.24125/365.24125-1.0), 1)
st = kent_wrap_at(lst0 - longitude, 360)
return st
def kent_zenith_distance(ha, decl, latitude):
"""Calculate LST, HA, and zenith distance.
Args:
ha: hour angle in decimal degrees
decl: declination in decimal degrees
latitude: in degrees
Returns:
zenith distance in decimal degrees
"""
rha = radians(ha)
rdecl = radians(decl)
rphi = radians(latitude)
rzd = arccos(cos(rha) * cos(rdecl) * cos(rphi) + sin(rdecl) * sin(rphi))
zd = degrees(rzd)
return zd
def kent_parallactic_angle(ha, decl, latitude):
"""Calculate the parallactic angle.
Args:
ha: Hour Angle (decimal degrees)
decl: declination (decimal degrees)
latitude: in decimas degrees
Returns:
the parallactic angle in decimal degrees
"""
rha = radians(ha)
rdecl = radians(decl)
rphi = radians(latitude)
rpsi = -1 * arctan2(sin(rha)*cos(rphi),
cos(rdecl)*sin(rphi) - sin(rdecl)*cos(rphi)*cos(rha))
psi = degrees(rpsi)
return psi
# -
def compute(dname):
Da = fits_table('~/desi-commish/%sa.fits' % dname)
Db = fits_table('~/desi-commish/%sb.fits' % dname)
D = merge_tables([Da, Db])
#dname = 'dither2'
#site = EarthLocation.of_site('kpno')
site = EarthLocation.from_geodetic(-111.59989 * u.deg, 31.96403 * u.deg, 2097.)
D.kent_zd = np.zeros(len(D))
D.redo_zd = np.zeros(len(D))
D.kent_parallactic = np.zeros(len(D))
for i,d in enumerate(D):
#time = Time(d.mjd, format='mjd', scale='tai')
#off = (d.exptime / 2.) / (3600.*24.)
off = 0.
#time = Time(d.mjd + off, format='mjd', scale='ut1')
time = Time(d.mjd + off, format='mjd', scale='utc')
offset = (overheadtime + gfaexptime + specexptime/2.) / (3600.*24.)
kent_mjd = d.mjd + offset
kent_time = Time(d.mjd + offset, format='mjd', scale='utc')
coords = SkyCoord(d.sky_ra, d.sky_dec, unit='deg')
altaz = coords.transform_to(AltAz(obstime=time, location=site))
zd = 90. - altaz.alt.to_value(unit=u.deg)
print('ZD', zd, 'vs', d.zd)
D.redo_zd[i] = zd
# from PlateMaker/desi.dat
longitude = 111.6003
latitude = 31.9634
lst = kent_sidereal_time(kent_mjd, longitude)
ha = kent_wrap_at(lst - d.sky_ra, 180)
zd = kent_zenith_distance(ha, d.sky_dec, latitude)
D.kent_zd[i] = zd
D.kent_parallactic[i] = -kent_parallactic_angle(ha, d.sky_dec, latitude)
#kent_altaz = coords.transform_to(AltAz(obstime=kent_time, location=site))
#D.kent_zd[i] = 90. - kent_altaz.alt.to_value(unit=u.deg)
#tan = tand(D.zd)
tan = tand(D.kent_zd)
parallactic = D.kent_parallactic
xoffth0 = polyeval(tan, xoffcoeff)
yoffth0 = polyeval(tan, yoffcoeff)
cs = cosd(parallactic)
sn = sind(parallactic)
D.xoffth = xoffth0 * cs - yoffth0 * sn
D.yoffth = yoffth0 * cs + xoffth0 * sn
adc1coeffs = [-0.0008, -0.3981, -0.1897]
adc2coeffs = [0.0361, 0.3586, 0.2010]
adc1 = np.rad2deg(polyeval(tan, adc1coeffs)) + parallactic
adc2 = np.rad2deg(polyeval(tan, adc2coeffs)) + parallactic
adc1 += (adc1 < 0.)*360.
adc2 += (adc2 < 0.)*360.
D.adc1 = adc1
D.adc2 = adc2
return D
# +
D = compute('dither2')
plt.plot(D.zd, D.redo_zd - D.zd, 'o-', label='Redo')
plt.plot(D.zd, D.kent_zd - D.zd, 'o-', label='Kent')
plt.ylabel('Redo ZD - ZD');
plt.show()
plt.plot(D.parallac, D.kent_parallactic - D.parallac)
plt.ylabel('Kent Parallactic - Parallactic');
plt.show()
#plt.plot(D.zd, D.zd, 'k-')
plot(D, 'dither2')
# -
for dname in ['dither2', 'dither3', 'dither4', 'dither5']:
D = compute(dname)
cosdec = np.cos(np.deg2rad(D.sky_dec))
dR = (D.gfa_ra - D.sky_ra)*cosdec*3600.
dD = (D.gfa_dec - D.sky_dec)*3600.
plt.clf()
plt.plot(dR, dD, 'o-', label='Mean GFA - SKY')
plt.plot(D.xoffth * 60, D.yoffth * 60, 'o-', label='Kent offsets')
plt.legend();
plt.title('Dither sequence %s' % dname)
plt.axhline(0, color='k', alpha=0.1)
plt.axvline(0, color='k', alpha=0.1)
plt.axis('equal')
plt.savefig('kent-offsets-%s.png' % dname)
plt.show()
site = EarthLocation.of_site('kpno')
print(site)
site = EarthLocation.from_geodetic(-111.59989 * u.deg, 31.96403 * u.deg, 2097.)
#OBS-LAT = '31.96403' / [deg] Observatory latitude
#OBS-LONG= '-111.59989' / [deg] Observatory east longitude
#OBS-ELEV= 2097. / [m] Observatory elevation
print(site)
# +
#plt.plot(D.zd,xoffth * 60., 'o-', label='Xoffth');
#plt.plot(D.zd,yoffth * 60., 'o-', label='Yoffth');
#plt.xlabel('ZD (deg)')
#plt.ylabel('Offsets (arcsec)');
# -
def plot(D, dname):
plt.figure(figsize=(12,10))
plt.subplot(2,2,1)
plt.plot(D.expnum, D.adc1, 'o-', label="Steve's code");
plt.plot(D.expnum, D.adc1phi, 'x-', label='ADC1PHI from header');
plt.legend()
plt.xlabel('Exposure number')
plt.ylabel('ADC1 angle (deg)');
plt.subplot(2,2,2)
plt.plot(D.expnum, D.zd, 'o-', label='Header')
plt.plot(D.expnum, D.kent_zd, 'o-', label='Steve')
plt.legend()
plt.xlabel('Exposure number')
plt.ylabel('Zenith distance (deg)')
plt.subplot(2,2,3)
plt.plot(D.expnum, D.adc2, 'o-', label="Steve's code");
plt.plot(D.expnum, D.adc2phi, 'x-', label='ADC2PHI from header');
plt.legend()
plt.xlabel('Exposure number')
plt.ylabel('ADC2 angle (deg)');
plt.subplot(2,2,4)
plt.plot(D.expnum, D.parallac, 'o-', label='Header')
plt.plot(D.expnum, D.kent_parallactic, 'o-', label='Steve')
plt.legend()
plt.xlabel('Exposure number')
plt.ylabel('Parallactic angle (deg)')
plt.suptitle('Dither sequence %s' % (dname));
plt.savefig('adc-%s.png' % dname)
for dname in ['dither2', 'dither3', 'dither4', 'dither5']:
D = compute(dname)
print('D expnums', D.expnum.min(), D.expnum.max())
plot(D, dname)
| kent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="LeMagleaQnv0" colab_type="text"
# # Maplotlib Quickstart Guide
# + [markdown] id="F9VGilB3AWU7" colab_type="text"
# ### Was ist `matplotlib` und wofür brauchen wir es?
# + [markdown] id="h8x-k01JAWVA" colab_type="text"
# `matplotlib` ist eine Python Bibliothek.
# Es ermöglicht uns, durch das `pyplot` Modul, alle möglichen Arten von Diagrammen zu erstellen.
#
# Wir wollen die Bibliothek vor allem für die Darstellung verschiedener Diagramme nutzen:
# - Genauigkeit der Ergebnisse unserer Netze
# - korrekte Lables neben den tatsächlich erkannten Ziffern
# - Fehlerfunktion
#
# + [markdown] id="8PEFolK-ry4J" colab_type="text"
# ### Importieren von `matplotlib`
#
# Wir importieren das `matplotlib.pyplot` Modul unter dem Namen `plt` (für *engl. plot = zeichnen*), damit wir weniger schreiben müssen.
#
# + id="vybPnOXfuof4" colab_type="code" colab={}
import matplotlib.pyplot as plt
# + [markdown] id="HIdpzU8ni5Ep" colab_type="text"
#
# #### Jupyter Notebook Spiezial-Befehl: `%matplotlib inline`
# * ein "magic command" aus IPython für `matplotlib`
# * ermöglicht die Ausgabe von Diagrammen direkt "inline" in der Zelle des Notebooks
# * ausschließlich bei lokaler Installation notwendig, nicht bei der Ausführung in Google Colab
#
#
#
# + id="AfF8Wg5GjNMo" colab_type="code" colab={}
# %matplotlib inline
# + [markdown] id="4HWlvoFcsgjw" colab_type="text"
# ### Ein einfacher Plot
# + id="wX2p2IoJAWVo" colab_type="code" outputId="1a5f2c94-71f9-42fa-efcc-57d1282d1b2f" executionInfo={"status": "ok", "timestamp": 1580070684715, "user_tz": -60, "elapsed": 980, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06560882086860632417"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
plt.plot([1,2,4,8,16]) # plottet angegebene 5 Zahlen mit Standardabstand
plt.show() # Zeichnet den Plot
# + [markdown] id="fJgp2yRcNRgW" colab_type="text"
# Wir geben beim Zeichnen eines Plots immer zuerst die Werte des Defintionsbereichs und danach die Werte des Wertebereichs an.
# Sind nur Werte für eine Achse angegeben, handelt es sich hierbei um den Wertebereich.
#
# Verschiedenen Schreibweisen für die Angabe der Listen:
# ```python
# plt.plot([1,2,3], [1,1,1])
# plt.plot(range(1,4), [1] * 3) # Äquivalent
# ```
# + id="odHodDmRsj5Z" colab_type="code" outputId="42c1185f-8c47-408c-abb8-0671b6b04e03" executionInfo={"status": "ok", "timestamp": 1579107874299, "user_tz": -60, "elapsed": 805, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "12876211986743604823"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
plt.plot([1,2,4,8,16], [1,0,1,0,1]) # Plottet für jeden x-Wert der ersten Liste den dazugehörigen Wert aus der zweiten Liste (y-Wert)
plt.show() # Zeichnet den Plot
# + [markdown] id="HiyCNfLQAWV6" colab_type="text"
# ### Labels
# Wir können beide Achsen mit `ylabel()` und `xlabel()` beschriften und das Diagram mit `title()`.
#
#
# Daten können dabei ebenso als Dictionary vorliegen und ganz einfach in eine Liste konvertiert werden.
#
# + id="OaxR8R-RtHlo" colab_type="code" outputId="c91c9864-f52b-4576-b652-663041530c68" executionInfo={"status": "ok", "timestamp": 1580072507517, "user_tz": -60, "elapsed": 1173, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06560882086860632417"}} colab={"base_uri": "https://localhost:8080/", "height": 295}
data = {
1220: 2_400,
1709: 55_196,
1893: 1_682_172,
1920: 3_879_409,
1941: 4_383_882,
1949: 3_328_193,
2018: 3_644_826
} # Quelle: https://de.wikipedia.org/wiki/Einwohnerentwicklung_von_Berlin
plt.plot(list(data.keys()), list(data.values()))
plt.ylabel("Einwohnerzahl")
plt.xlabel("Jahr")
plt.title("Entwicklung der Einwohnerzahl von Berlin")
plt.show()
# + [markdown] colab_type="text" id="K56WlRHrBJcL"
# ### Darstellung ändern
# Wir können auch bestimmen, wie unsere Daten dargestellt werden. Die Farbe, Verbindung und die Form der Datenpunkte kann in einem String nacheinander angegeben werden.
#
# Farben:
# - r: rot
# - b: blau
# - g: grün
#
# Oder die Farbe kann als RGB-Wert angegeben werten
# - color=(177/255, 6/255, 58/255)
#
# Formen der Datenpunkten:
# - o: Punkt
# - s: Quadrat
#
# Linienarten für Verbindungen:
# - -- Striche
# - \- Linie
#
#
#
# + colab_type="code" outputId="0b85189f-3826-4f10-aa78-4ac3ef57af25" executionInfo={"status": "ok", "timestamp": 1582190966745, "user_tz": -60, "elapsed": 1276, "user": {"displayName": "Scretch9", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mD0ILkHwEf4Ju_-gJcERDwzFK3AhTzwf3CPvo6_=s64", "userId": "03323261342458155501"}} id="w3X1BK0wBJcM" colab={"base_uri": "https://localhost:8080/", "height": 1000}
p = [1,2,4,8,16]
plt.plot(p, 'r--') # 'r--' für rote Striche
plt.show()
plt.plot(p, 'bs-') # 'bs' für blaue Boxen
plt.show()
plt.plot(p, 'g^') # 'g^' für grüne Dreiecke
plt.show()
plt.plot(p, 'o-', color=(177/255, 6/255, 58/255)) # eigene definierte Farbe, Punkte verbunden durch Linien
plt.show()
# + [markdown] id="du2knGBhAWWW" colab_type="text"
# ### Funktionen plotten und beschriften
# * Mit `label='Foo'` können wir einer Kurve einen Namen geben.
# * Mit `plt.legend()` wird dieser Name in einer Legende angezeigt.
# + id="wyy1SfBuAWWY" colab_type="code" outputId="18ca3556-78b9-4f27-9592-9a867b18e4a4" executionInfo={"status": "ok", "timestamp": 1580078365969, "user_tz": -60, "elapsed": 942, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06560882086860632417"}} colab={"base_uri": "https://localhost:8080/", "height": 265}
import numpy as np
# Initialisierung eines Nupmy Arrays wie bereits kennengelernt (20 Werte gleichmäßig zwischen 0 und 2 verteilt)
p = np.linspace(0, 2, 20)
plt.plot(p, p, label='Linear') # Plottet die lineare Funktion
plt.plot(p, p**2, label='Quadratisch') # Plottet die quadratische Funktion
plt.plot(p, p**3, label='Kubisch') # Plottet die kubische Funktion
func = lambda p: p // 0.3 # eigene Funktion in der lambda Schreibweise definieren
plt.plot(p, func(p) , label='Sägezahn') # Plottet die eigene modulo Funktion
plt.legend() # Legende anzeigen
plt.show()
# + [markdown] id="IrhGWCNk3C7_" colab_type="text"
# ### Kategorische Daten mit verschiedenen Diagrammtypen plotten
# + [markdown] id="j7lD00NNXilr" colab_type="text"
# * Mit `plt.figure(figsize=())` können wir mehrere Plots zusammen darstellen
# * `plt.subplot(1,2)` heißt, wir legen einen Plot mit einer Reihe von zwei Subplots an.
# * `figsize=(15,3)` erstellt einen Plot der Größe 15 x 3
# * `sharey=True` teilt die Beschriftung der y-Achse
#
# * `bar()` erstellt ein **Balkendiagramm**
# * `scatter()` erstellt ein **Streudiagramm**
# + id="2xa_K18AtSLp" colab_type="code" outputId="e5fa99db-690e-4daf-b0bc-9f4fe1e2778f" executionInfo={"status": "ok", "timestamp": 1582191048299, "user_tz": -60, "elapsed": 951, "user": {"displayName": "Scretch9", "photoUrl": "https://lh3.googleusercontent.com/a-/AAuE7mD0IL<KEY>", "userId": "03323261342458155501"}} colab={"base_uri": "https://localhost:8080/", "height": 376}
data = {
"London": 8_908_081 ,
"Istanbul": 15_067_724,
"Madrid": 3_266_126,
"<NAME>": 5_383_890,
"Berlin": 3_651_857,
"Moskau": 12_432_531
} # Quelle: https://de.wikipedia.org/wiki/Liste_der_gr%C3%B6%C3%9Ften_St%C3%A4dte_Europas (Stand 15.1.2020)
names = list(data.keys())
values = list(data.values())
fig, plots = plt.subplots(1, 2, figsize=(19, 5), sharey=True) # Legt ein 1x2 Grid der Größe 19x5 aus Subplots an
# Die Subplots sollen sich dabei die y Achse teilen
plots[0].bar(names, values) # Plottet die Werte als Balkendiagramm
plots[0].title.set_text("Balkendiagramm") # Setzt den Titel des ersten Subplots
plots[1].scatter(names, values) # Plottet die Werte als Streudiagramm
plots[1].title.set_text("Streudiagramm") # Setzt den Titel des zweiten Subplots
fig.suptitle("Die grössten Städte Europas") # Setzt den Titel des gesamten Plots
plt.show() # Zeichnet den Plot
# + [markdown] id="GlavhFWxAWWx" colab_type="text"
# ### Weitere Diagrammarten
#
# **Die Deklaration ist bei allen pyplot-Funktionen ähnlich**
# * Spezial-Parameter für **Pie-Chart** ist `explode` (das Stück, dass hervorgehoben werden soll)
# + id="a29AlThaAWWz" colab_type="code" outputId="80e5266f-8a9e-4072-83ae-5f3d5a3a1de9" executionInfo={"status": "ok", "timestamp": 1580077898507, "user_tz": -60, "elapsed": 1479, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06560882086860632417"}} colab={"base_uri": "https://localhost:8080/", "height": 277}
# Kuchendiagramm, Stücke im Uhrzeigesinn:
labels = 'Hälfte der Punkte', 'Nicht bestanden', ' > 75% der Punkte', '100% erreicht'
sizes = [50, 10, 38, 2]
explode = (0, 0, 0.1, 0) # drittes Stück hervorheben
fig1, ax1 = plt.subplots()
ax1.pie(sizes, # Müssen "Pie Chart" mit angegebenen Parametern erzeugen
explode=explode,
labels=labels,
autopct='%1.1f%%', # Legt das Format des Prozentsatzes der Felder fest (float mit einer Kommastelle)
shadow=True, # Schatten für das hervorgehobene Feld
startangle=90) # Legt den Winkel fest, um den die angegebenen Felder gedreht werden
ax1.axis('equal') # als Kreis anzeigen
plt.suptitle('Kursteilnehmer')
plt.show()
# + [markdown] id="LqRHbruEAWW9" colab_type="text"
# ### Bilder plotten
# + [markdown] id="1tE9Xr6lAWXB" colab_type="text"
# Wir können mit `urlretrieve` Bilder aus dem Web laden oder bei der Ausführung in einem lokalen Jupyter Notebook einfach lokale Dateien mit Pfad angeben.
# * `plt.imread()` kann Bilder einlesen
# * `plt.imshow()` zeigt das Bild an
# * `# plt.axis('off')` entfernt die Achsen
# + id="6Lw5HYqnAWXF" colab_type="code" outputId="b803ecd0-687d-4bb5-ca4d-484e5a7aa2cd" executionInfo={"status": "ok", "timestamp": 1580078174542, "user_tz": -60, "elapsed": 2254, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "06560882086860632417"}} colab={"base_uri": "https://localhost:8080/", "height": 269}
# Wir benötigen urlretrieve und tempfile um das Bild herunterzuladen
from urllib.request import urlretrieve
import tempfile
url = "https://upload.wikimedia.org/wikipedia/commons/2/24/Maglemer_%28apple%29.jpg"
with tempfile.NamedTemporaryFile() as file: # Erzeugt eine temporäre Datei
urlretrieve(url, file.name) # Speichert das Bild unter file.name
image = plt.imread(file.name) # Liest das Bild unter file.name als Bild
# plt.axis('off') # Entfernt die Achsenbeschriftung
imgplot = plt.imshow(image) # Zeigt das Bild an
| woche2/notebooks/intro-matplotlib/intro-matplotlib.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (TensorFlow 2.3 Python 3.7 GPU Optimized)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-west-2:236514542706:image/tensorflow-2.3-gpu-py37-cu110-ubuntu18.04-v3
# ---
# # SageMakerCV TensorFlow Tutorial
#
# SageMakerCV is a collection of computer vision tools developed to take full advantage of Amazon SageMaker by providing state of the art model accuracy, training speed, and training cost reductions. SageMakerCV is based on the lessons we learned from developing the record breaking computer vision models we announced at Re:Invent in 2019 and 2020, along with talking to our customers and understanding the challenges they faced in training their own computer vision models.
#
# The tutorial in this notebook walks through using SageMakerCV to train Mask RCNN on the COCO dataset. The only prerequisite is to setup SageMaker studio, the instructions for which can be found in [Onboard to Amazon SageMaker Studio Using Quick Start](https://docs.aws.amazon.com/sagemaker/latest/dg/onboard-quick-start.html). Everything else, from getting the COCO data to launching a distributed training cluster, is included here.
#
# ## Setup and Roadmap
#
# Before diving into the tutorial itself, let's take a minute to discuss the various tools we'll be using.
#
# #### SageMaker Studio
# [SageMaker Studio](https://aws.amazon.com/sagemaker/studio/) is a machine learning focused IDE where you can interactively develop models and launch SageMaker training jobs all in one place. SageMaker Studio provides a Jupyter Lab like environment, but with a number of enhancements. We'll just scratch the surface here. See the [SageMaker Studio Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/studio.html) for more details.
#
# For our purposes, the biggest difference from regular Jupyter Lab is that SageMaker Studio allows you to change your compute resources as needed, by connecting notebooks to Docker containers on different ML instances. This is a little confusing to just describe, so let's walk through an example.
#
# Once you've completed the setup on [Onboard to Amazon SageMaker Studio Using Quick Start](https://docs.aws.amazon.com/sagemaker/latest/dg/onboard-quick-start.html), go to the [SageMaker Console](https://us-west-2.console.aws.amazon.com/sagemaker) and click `Open SageMaker Studio` near the top right of the page.
#
# <img src="../assets/SageMaker_console.png" style="width: 600px">
#
# If you haven't yet created a user, do so via the link at the top left of the page. Give it any name you like. For execution role, you can either use an existing SageMaker role, or create a new one. If you're unsure, create a new role. On the `Create IAM Role` window, make sure to select `Any S3 Bucket`.
#
# <img src="../assets/Create_IAM_role.png" style="width: 600px">
#
# Back on the SageMaker Studio page, select `Open Studio` next to the user you just created.
#
# <img src="../assets/Studio_domain.png" style="width: 600px">
#
# This will take a couple minutes to start up the first time. Once it starts, you'll have a Jupyter Lab like interface running on a small instance with an attached EBS volume. Let's start by taking a look at the `Launcher` tab.
#
# <img src="../assets/Studio_launcher.png" style="width: 750px">
#
# If you don't see the `Launcher`, you can bring one up by clicking the `+` on the menu bar in the upper left corner.
#
# <img src="../assets/Studio_menu_bar.png" style="width: 600px">
#
# The `Launcher` gives you access to all kinds of tools. This is where you can create new notebooks, text files, or get a terminal for your instance. Try the `System Terminal`. This gives you a new terminal tab for your Studio instance. It's useful for things like downloading data or cloning github repos into studio. For example, you can run `aws s3 ls` to browse your current S3 buckets. Go ahead and clone this repo onto Studio with
#
# `git clone https://github.com/aws-samples/amazon-sagemaker-cv`
#
# Let's look at the launcher one more time. Bring another one up with the `+`. Notice you have an option for `Select a SageMaker image` above the button to launch a notebook. This allows you to select a Docker image that will launch on a new instance. The notebook you create will be attached to that new instance, along with the EBS volume on your Studio instance. Let's try it out. On the `Launcher` page, click the drop down menu next to `Select a SageMaker Image` and select `TensorFlow 2.3 Python 3.7 (Optimzed for GPU)`, then click the `Notebook` button below the dropdown.
#
# <img src="../assets/Select_tensorflow_image.png" style="width: 600px">
#
# Take a look at the upper righthand corner of the notebook.
#
# <img src="../assets/notebook_tensorflow_kernel.png" style="width: 600px">
#
# The `Ptyhon 3 (TensorFlow 2.3 Python 3.7 GPU Optimized)` refers to the kernel associated with this notebook. The `Unknown` refers to the current instance type. Click `Unknown` and select `ml.g4dn.xlarge`.
#
# <img src="../assets/instance_types.png" style="width: 600px">
#
# This will launch a `ml.g4dn.xlarge` instance and attach this notebook to it. This will take a couple of minutes, because Studio needs to download the PyTorch Docker image to the new instance. Once an instance has started, launching new notebooks with the same instance type and kernel is immediate. You'll also see the `Unknown` replaced with and instance description `4 vCPU + 16 GiB + 1 GPU`. You can also change instance as needed. Say you want to run your notebook on a `ml.p3dn.24xlarge` to get 8 GPUs. To change instances, just click the instance description. To get more instances in the menu, deselect `Fast launch only`.
#
# Once your notebook is up and running, you can also get a terminal into your new instance.
#
# <img src="../assets/Launch_terminal.png" style="width: 600px">
#
# This can be useful for customizing your image with setup scripts, pip installing new packages, or using mpi to launch multi GPU training jobs. Click to get a terminal and run `ls`. Note that you have the same directories as your main Studio instance. Studio will attach the same EBS volume to all the instances you start, so all your files and data are shared across any notebooks you start. This means that you can prototype a model on a single GPU instance, then switch to a multi GPU instance while still having access to all of your data and scripts.
#
# Finally, when you want to shut down instances, click the circle with a square in it on the left hand side.
#
# <img src="../assets/running_instances.png" style="width: 600px">
#
# This shows your current running instances, and the Docker containers attached to those instances. To shut them down, just click the power button to their right.
#
# Now that we've explored studio a bit, let's get started with SageMakerCV. If you followed the instructions above to clone the repo, you should have `amazon-sagemaker-cv` in the file browser on the left. Navigate to `amazon-sagemaker-cv/pytorch/tutorial.ipynb` to open this notebook on your instance. If you still have a `g4dn` running, it should automatically attach to it.
#
# The rest of this notebook is broken into 4 sections.
#
# - Installing SageMakerCV and Downloading the COCO Data
#
# Since we're using the base AWS Deep Learning Container image, we need to add the SageMakerCV tools. Then we'll download the COCO dataset and upload it to S3.
#
# - Prototyping in Studio
#
# We'll walk through how to train a model on Studio, how SageMakerCV is structured, and how you can add your own models and features.
#
# - Launching a SageMaker Training Job
#
# There's lots of bells and whistles available to train your models fast, an on large datasets. We'll put a lot of those together to launch a high performance training job. Specifically, we'll create a training job with 4 P4d.24xlarge instances connected with 400 GB EFA, and streaming our training data from S3, so we don't have to load the dataset onto the instances before training. You could even use this same configuration to train on a dataset that wouldn't fit on the instances. If you'd rather only launch a smaller (or larger) training cluster, we'll discuss how to modify configuration.
#
# - Testing Our Model
#
# Finally, we'll take the output trained Mask RCNN model and visualize its performance in Studio.
#
# #### Installing SageMakerCV
#
# To install SageMakerCV on the PyTorch Studio Docker, just run `pip install -e .` in the `amazon-sagemaker-cv/tensorflow` directory. You can do this with either an image terminal, or by running the paragraph below. Note that we use the `-e` option. This will keep the SageMakerCV modules editable, so any changes you make will be launched on your training job.
# !pip install -e .
# ***
# ### Setup on S3 and Download COCO data
#
# Next we need to setup an S3 bucket for all our data and results. Enter a name for your S3 bucket below. You can either create a new bucket, or use an existing bucket. If you use an existing bucket, make sure it's in the same region where you plan to run training. For new buckets, we'll specify that it needs to be in the current SageMaker region. By default we'll put everything in an S3 location on your bucket named `smcv-tutorial`, and locally in `/root/smcv-tutorial`, but you can change these locations.
S3_BUCKET = 'sagemaker-smcv-tutorial' # Don't include s3:// in your bucket name
S3_DIR = 'smcv-tensorflow-tutorial'
LOCAL_DATA_DIR = '/root/smcv-tensorflow-tutorial' # For reasons detailed in Distributed Training, do not put this dir in the SageMakerCV dir
import os
import zipfile
from pathlib import Path
from s3fs import S3FileSystem
from concurrent.futures import ThreadPoolExecutor
import boto3
from botocore.client import ClientError
from tqdm import tqdm
# +
s3 = boto3.resource('s3')
boto_session = boto3.session.Session()
region = boto_session.region_name
# Check if bucket exists. If it doesn't, create it.
try:
bucket = s3.meta.client.head_bucket(Bucket=S3_BUCKET)
print(f"S3 Bucket {S3_BUCKET} Exists")
except ClientError:
print(f"Creating Bucket {S3_BUCKET}")
bucket = s3.create_bucket(Bucket=S3_BUCKET, CreateBucketConfiguration={'LocationConstraint': region})
# -
# ***
#
# Next we'll download the COCO data to Studio, unzip the files, create TFRecords, and upload to S3. The reason we want the data in two places is that it's convenient to have the data locally on Studio for prototyping. We also want to unarchive the data before moving it to S3 so that we can stream it to our training instances instead of downloading it all at once.
#
# Once this is finished, you'll have copies of the COCO data on your Studio instance, and in S3. Be careful not to open the `data/coco/train2017` dir in the Studio file browser. It contains 118287 images, and can cause your web browser to crash. If you need to browse these files, use the terminal.
#
# This only needs to be done once, and only if you don't already have the data. The COCO 2017 dataset is about 20GB, so this step takes around 30 minutes to complete. The next paragraph sets up all the file directories we'll use for downloading, and later in training.
COCO_URL="http://images.cocodataset.org"
ANNOTATIONS_ZIP="annotations_trainval2017.zip"
TRAIN_ZIP="train2017.zip"
VAL_ZIP="val2017.zip"
COCO_DIR=os.path.join(LOCAL_DATA_DIR, 'data', 'coco')
TF_RECORD_DIR=os.path.join(LOCAL_DATA_DIR, 'data', 'coco', 'tfrecord')
os.makedirs(COCO_DIR, exist_ok=True)
os.makedirs(TF_RECORD_DIR, exist_ok=True)
S3_DATA_LOCATION=os.path.join("s3://", S3_BUCKET, S3_DIR, "data", "coco")
S3_WEIGHTS_LOCATION=os.path.join("s3://", S3_BUCKET, S3_DIR, "data", "weights", "resnet")
WEIGHTS_DIR=os.path.join(LOCAL_DATA_DIR, 'data', 'weights')
os.makedirs(WEIGHTS_DIR, exist_ok=True)
R50_WEIGHTS_SRC="https://sagemakercv.s3.us-west-2.amazonaws.com/weights/tensorflow"
R50_WEIGHTS_TAR="tensorflow_resnet50.tar"
R50_WEIGHTS="tensorflow_resnet50"
# ***
#
# This paragraph will download everything. It takes around 30 minutes to complete.
# +
print("Downloading annotations")
# !wget -O $COCO_DIR/$ANNOTATIONS_ZIP $COCO_URL/annotations/$ANNOTATIONS_ZIP
# !unzip $COCO_DIR/$ANNOTATIONS_ZIP -d $COCO_DIR
# !aws s3 cp --recursive $COCO_DIR/annotations $S3_DATA_LOCATION/annotations
print("Downloading COCO training data")
# !wget -O $COCO_DIR/$TRAIN_ZIP $COCO_URL/zips/$TRAIN_ZIP
# train data has ~128000 images. Unzip is too slow, about 1.5 hours beceause of disk read and write speed on the EBS volume.
# This technique is much faster because it grabs all the zip metadata at once, then uses threading to unzip multiple files at once.
print("Unzipping COCO training data")
train_zip = zipfile.ZipFile(os.path.join(COCO_DIR, TRAIN_ZIP))
jpeg_files = [image.filename for image in train_zip.filelist if image.filename.endswith('.jpg')]
os.makedirs(os.path.join(COCO_DIR, 'train2017'))
with ThreadPoolExecutor() as executor:
threads = list(tqdm(executor.map(lambda x: train_zip.extract(x, COCO_DIR), jpeg_files), total=len(jpeg_files)))
print("Downloading COCO validation data")
# !wget -O $COCO_DIR/$VAL_ZIP $COCO_URL/zips/$VAL_ZIP
# switch to also threading
# !unzip -q $COCO_DIR/$VAL_ZIP -d $COCO_DIR
val_images = [i for i in Path(os.path.join(COCO_DIR, 'val2017')).glob("*.jpg")]
# !apt-get -y update && apt install -y protobuf-compiler
# !cd sagemakercv/data/coco && ./process_coco_tfrecord.sh $COCO_DIR $TF_RECORD_DIR
tfrecord_train = list(Path(TF_RECORD_DIR).glob('train-*.tfrecord'))
tfrecord_val = list(Path(TF_RECORD_DIR).glob('val-*.tfrecord'))
s3fs = S3FileSystem()
print("Uploading training tfrecords to S3")
with ThreadPoolExecutor() as executor:
threads = list(tqdm(executor.map(lambda record: s3fs.put(record.as_posix(),
os.path.join(S3_DATA_LOCATION, 'tfrecord', 'train2017', record.name)),
tfrecord_train), total=len(tfrecord_train)))
print("Uploading validation tfrecords to S3")
with ThreadPoolExecutor() as executor:
threads = list(tqdm(executor.map(lambda record: s3fs.put(record.as_posix(),
os.path.join(S3_DATA_LOCATION, 'tfrecord', 'val2017', record.name)),
tfrecord_val), total=len(tfrecord_val)))
print("Downloading Resnet Weights")
# !wget -O $WEIGHTS_DIR/$R50_WEIGHTS_TAR $R50_WEIGHTS_SRC/$R50_WEIGHTS_TAR
# !tar -xf $WEIGHTS_DIR/$R50_WEIGHTS_TAR -C $WEIGHTS_DIR
s3fs.put(os.path.join(WEIGHTS_DIR, R50_WEIGHTS), S3_WEIGHTS_LOCATION, recursive=True)
print("Finished!")
# -
# ***
# ### Training on Studio
#
# Now that we have the data, we can get to training a Mask RCNN model to detect objects in the COCO dataset images.
#
# Since training on a single GPU can take days, we'll just train for a couple thousands steps, and run a single evaluation to make sure our model is at least starting to learn something. We'll train a full model on a larger cluster of GPUs in a SageMaker training job.
#
# The reason we first want to train in Studio is that we want to dig a bit into the SageMakerCV framework, and talk about the model architecture, since we expect many users will want to modify models for their own use cases.
#
# #### Mask RCNN
#
# First, just a very brief overview of Mask RCNN. If you would like a more in depth examination, we recommend taking a look at the [original paper](https://arxiv.org/abs/1703.06870), the [feature pyramid paper](https://arxiv.org/abs/1612.03144) which describes a popular architectural change we'll use in our model, and blog posts from [viso.ai](https://viso.ai/deep-learning/mask-r-cnn/), [tryo labs](https://tryolabs.com/blog/2018/01/18/faster-r-cnn-down-the-rabbit-hole-of-modern-object-detection/), [<NAME>](https://jonathan-hui.medium.com/image-segmentation-with-mask-r-cnn-ebe6d793272), and [<NAME>](https://lilianweng.github.io/lil-log/2017/12/31/object-recognition-for-dummies-part-3.html).
#
# Mask RCNN is a two stage object detection model that locates objects in images by places bounding boxes around, and segmentation masks over, any object for which the model is trained to find. It also provides classifcations for each object.
#
# <img src="../assets/traffic.png" style="width: 1200px">
#
# Mask RCNN is called a two stage model because it performs detection in two steps. The first identifies any objects in the image, versus background. The second stage determines the specific class of each object, and applies the segmentation mask. Below is an architectural diagram of the model. Let's walk through each step.
#
# <img src="../assets/mask_rcnn_arch.jpeg" style="width: 1200px">
# Credit: <NAME>
#
# The `Convolution Network` is often referred to as the model backbone. This is a pretrained image classification model, commonly ResNet, which has been trained on a large image classification dataset, like ImageNet. The classification layer is removed, and instead the backbone outputs a set of convolution feature maps. The idea is, the classification model learned to identify objects in the process of classifying images, and now we can use that information to build a more complex model that can find those objects in the image. We want to pretrain because training the backbone at the same time as training the object detector tends to be very unstable.
#
# One additional component that is sometimes added to the backbone is a `Fearure Pyramid Network`. This take the outputs of the backbone, and combines them to together into a new set of feature maps by perform both up and down convolutions. The idea is that the different sized feature maps will help the model detect images of different sizes. The feature pyramid also helps with this, by allowing the different feature maps to share information with each other.
#
# The outputs of the feature pyramid are then passed to the `Region Proposal Network` which is responsible for finding regions of the image that might contain an object (this is the first of the two stages). The RPN will output several hundred thousand regions, each with a probability of containing an object. We'll typically take the top few thousand most likely regions. Because these several thousand regions will usually have a lot of overlap, we perform [non-max supression](https://towardsdatascience.com/non-maximum-suppression-nms-93ce178e177c), which removed regions with large areas of overlap. This gives us a set of `regions of interest` regions of the image that we think might contain an image.
#
# Next, we use those regions to crop out the corresponding sections of the feature maps that came from the feature pyramid network using a technique called [ROI align](https://firiuza.medium.com/roi-pooling-vs-roi-align-65293ab741db).
#
# We pass our cropped feature maps to the `box head` which classifies each region into either a specific object category, or as background. It also refines the position of the bounding box. In Mask RCNN, we also pass the feature maps to a `mask head` which produces a segmentation mask over the object.
#
# #### SageMakerCV Internals
#
# An important feature of Mask RCNN is its multiple heads. One head constructs a bounding box, while another creates a mask. These are referred to as the `ROI heads`. It's common for users to extend this and other two stage models by adding their own ROI heads. For example, a keypoint head it common. Doing so means modifying SageMakerCV's internals, so let's talk about those for a second.
#
# The high level Mask RCNN model can be found in `amazon-sageamaker-cv/pytorch/sagemakercv/detection/detectors/two_stage_detector.py`. If you trace through the `call` function, you'll see that the model first passes an image through the backbone, neck, then the RPN. The RPN layer also contains the non-max supression step. The regions of interest are then passed to the roi heads, where the regions of interest are used to crop sections of the feature maps, which are then classified into object categories.
#
# Probably the most important feature to be aware of are the `build` imports at the top. Each section of the model has an associated build function `(build_backbone, build_neck, build_dense_head, build_roi_head)` which are implemented in the `build_two_stage_detector` at the bottom of the file. These functions simplify building the model by letting us pass in a single configuration file for building all the different pieces.
#
# For example, if you open `amazon-sageamaker-cv/tensorflow/sagemakercv/detection/roi_heads/standard_roi_head.py`, you'll find the `build_standard_roi_head` function at the bottom. To add a new head, you would write a Tensorflow module with its own build function. The decorator at the top of the build function allows it to be called from the config file. The dectorator `@HEADS.register("StandardRoIHead")` adds a dictionary entry so that when `StandardRoIHead` is in the config file, build_standard_roi_head gets called at the `build_roi_head`. If, for example, you specify `CascadeRoIHead` the associated builder for the cascade ROI head is used instead.
#
# Finally, a note about data loading. SageMakerCV uses and optimized TFRecord data format. The COCO dataloader can be found in `amazon-sageamaker-cv/tensorflow/sagemakercv/data/coco/dataloader.py`. It takes a file pattern in the form `data/coco/train2017/train*` which will include all files that start with `train` in the dataset. You can use either a local directory or an S3 location `s3://my-bucket/my-data/coco/train2017/train*`. The dataloader will automatically switch between the two. The S3 functionality is especially useful for distributed training with large datasets, since it means you can train without waiting for your data to download.
#
# #### Setting Up Training
#
# Let's actually use some of these functions to train a model.
#
# Start by importing the default configuration file.
from configs import cfg
# ***
# We use the [yacs](https://github.com/rbgirshick/yacs) format for configuration files. If you want to see the entire config, run `print(cfg.dump())` but this prints out a lot, and to not overwhelm you with too much information, we'll just focus on the bits we want to change for this model.
# ***
# First, let's put in all the file directories for the data and weights we downloaded in the previous section, as well as an output directory for the model results.
# +
cfg.PATHS.TRAIN_FILE_PATTERN = os.path.join(TF_RECORD_DIR, "train*")
cfg.PATHS.VAL_FILE_PATTERN = os.path.join(TF_RECORD_DIR, "val*")
cfg.PATHS.WEIGHTS = os.path.join(WEIGHTS_DIR, R50_WEIGHTS, "resnet50.ckpt")
cfg.PATHS.VAL_ANNOTATIONS = os.path.join(COCO_DIR, "annotations", "instances_val2017.json")
cfg.PATHS.OUT_DIR = os.path.join(LOCAL_DATA_DIR, "output")
# create output dir if it doesn't exist
os.makedirs(cfg.PATHS.OUT_DIR, exist_ok=True)
# -
# ***
# This section specifies model details, including the type of model, and internal hyperparameters. We wont cover the details of all of these, but more information can be found in this blog posts listed above, as well as the original paper.
cfg.LOG_INTERVAL = 50 # Number of training steps between logging interval
cfg.MODEL.DENSE.PRE_NMS_TOP_N_TRAIN = 2000 # Top regions of interest to select before NMS
cfg.MODEL.DENSE.POST_NMS_TOP_N_TRAIN = 1000 # Top regions of interest to select after NMS
cfg.MODEL.RCNN.ROI_HEAD = "StandardRoIHead" # ROI head with box and mask, if mask is set to true
cfg.MODEL.FRCNN.LOSS_TYPE = "giou"
cfg.MODEL.INCLUDE_MASK = True # include mask. switching this off runs Faster RCNN
# ***
# Next we set up the configuration for training, including the optimizer, hyperparameters, batch size, and training length. Batch size is global, so if you set a batch size of 64 across 8 GPUs, it will be a batch size of 8 per GPU. SageMakerCV currently supports the following optimizers: momentum SGD (stochastic gradient descent) and NovoGrad, and the following learning rate schedulers: stepwise and cosine decay. New, custom optimizers and schedulers can be added by modifying the `sagemakercv/training/builder.py` file.
#
# For training on Studio, we'll just run for a thousand steps. We'll be using SageMaker training instances for the full training on multiple GPUs.
cfg.INPUT.TRAIN_BATCH_SIZE = 4 # Training batch size
cfg.INPUT.EVAL_BATCH_SIZE = 8 # Training batch size
cfg.SOLVER.SCHEDULE = "CosineDecay" # Learning rate schedule, either CosineDecay or PiecewiseConstantDecay
cfg.SOLVER.OPTIMIZER = "NovoGrad" # Optimizer type NovoGrad or Momentum
cfg.SOLVER.LR = .002 # Base learning rate after warmup
cfg.SOLVER.BETA_1 = 0.9 # NovoGrad beta 1 value
cfg.SOLVER.BETA_2 = 0.5 # NovoGRad beta 2 value
cfg.SOLVER.MAX_ITERS = 2500 # Total training steps
cfg.SOLVER.WARMUP_STEPS = 250 # warmup steps
cfg.SOLVER.XLA = True # Train with XLA
cfg.SOLVER.FP16 = True # Train with mixed precision enables
cfg.SOLVER.TF32 = False # Train with TF32 data type enabled, only available on Ampere GPUs and TF 2.4 and up
# Finally, SageMakerCV includes a number of training hooks. These work similar to Keras callbacks by adding some functionality to training. We use our own training hooks and runner class which improves performance beyond the standard keras model.fit() training strategy.
#
# Here we include three hooks. The `CheckpointHook` loads the backbone weights, and saves a model checkpoint after each epoch. The `IterTimerHook` and `TextLoggerHook` print helpful training progress information out to CloudWatch during training.
cfg.HOOKS=["CheckpointHook",
"IterTimerHook",
"TextLoggerHook"]
# Let's save our new configuration file in case we want to use it in future training.
import yaml
from contextlib import redirect_stdout
local_config_file = f"configs/local-config-studio.yaml"
with open(local_config_file, 'w') as outfile:
with redirect_stdout(outfile): print(cfg.dump())
# A saved model configuration can be loaded by first running `from configs import cfg` and mapping our saved file with `merge_from_file`
cfg.merge_from_file(local_config_file)
# And now we can build and train our model. Import build functions so we can build pieces directory with our configuration file.
from sagemakercv.detection import build_detector
from sagemakercv.training import build_optimizer, build_scheduler, build_trainer
from sagemakercv.data import build_dataset
from sagemakercv.utils.dist_utils import get_dist_info, MPI_size, is_sm_dist
from sagemakercv.utils.runner import Runner, build_hooks
import tensorflow as tf
# And include some standard TensorFlow configuration setup so our model runs in mixed precision with XLA enabled.
rank, local_rank, size, local_size = get_dist_info()
devices = tf.config.list_physical_devices('GPU')
for device in devices:
tf.config.experimental.set_memory_growth(device, True)
tf.config.set_visible_devices([devices[local_rank]], 'GPU')
logical_devices = tf.config.list_logical_devices('GPU')
tf.config.optimizer.set_experimental_options({"auto_mixed_precision": cfg.SOLVER.FP16})
tf.config.optimizer.set_jit(cfg.SOLVER.XLA)
if int(tf.__version__.split('.')[1])>=4:
tf.config.experimental.enable_tensor_float_32_execution(cfg.SOLVER.TF32)
# Build the dataset and create an iterable object from it.
dataset = iter(build_dataset(cfg))
# Build the detector model.
detector = build_detector(cfg)
# Pass a single observation through the model so the shapes are set. This is necessary to load the backbone weights.
features, labels = next(dataset)
result = detector(features, training=False)
# Build the model optimizer. This will also build our learning rate schedule.
optimizer = build_optimizer(cfg)
# The trainer contains our training and evaluation step, and sets up our distributed training based on if we're using Horovod or SMDDP (more on this later).
trainer = build_trainer(cfg, detector, optimizer, dist='smd' if is_sm_dist() else 'hvd')
# Finally, the runner will manage our training and run our training hooks. This serves a similar role to training with Keras, but provides increased flexibility and training performance.
runner = Runner(trainer, cfg)
hooks = build_hooks(cfg)
for hook in hooks:
runner.register_hook(hook)
# Run training for 2500 steps. This will take about 30 minutes.
runner.run(dataset)
# So now we have a partially trained model. Let's go ahead and try visualizing the results. You'll notice it picks up common categories (such as people) better at this point. The images are randomly picked from the training data, so it might take a few tries to get an image where the model picks up objects at this point in training.
from sagemakercv.utils.visualization import build_image, restore_image
from sagemakercv.data.coco.coco_labels import coco_categories
import matplotlib.pyplot as plt
features, labels = next(dataset)
result = detector(features, training=False)
image_num = 0 # image number within the batch
# We first restore the original image, then extract the boxes and labels from the results.
image = restore_image(result['images'][image_num], features['image_info'][image_num]) # converts the image back to its original shape and color
boxes = result['detection_boxes'][image_num]
classes = result['detection_classes'][image_num]
scores = result['detection_scores'][image_num]
# Generate an image with the boxes and labels mapped onto it. The threshold limits the number of boxes to those were the model is at least this confident in the class.
detection_image = build_image(image, boxes, scores, classes, coco_categories, threshold=0.8)
plt.figure(figsize = (15, 15))
plt.imshow(detection_image)
# Great! So far you've built a partially trained model locally on Studio. For many applications, this might be enough. If all you need is to train a model on a small dataset, you can likely do everything you need with what we've covered so far.
#
# On the other hand, if you need to train a model on many GBs or even TBs of data, and don't want to wait weeks for it to finish, you'll need to run a distributed training job across multiple GPUs, or even multiple nodes. With SageMaker training jobs you can train on as many as 512 [A100 GPUs](https://www.nvidia.com/en-us/data-center/a100/). We won't go quite that far, but we'll show you how.
#
# The section below is also replicated in the `SageMaker.ipynb` notebook for future training once all the above setup is complete.
#
# Before we get started, a few notes about how SageMaker training instances work. SageMaker takes care of a lot of setup for you, but it's important to understand a little of what's happening under the hood so you can customize training to your own needs.
#
# First we're going to look at a toy estimator to explain what's happening:
#
# ```
# from sagemaker import get_execution_role
# from sagemaker.tensorflow import TesnorFlow
#
# estimator = TesnorFlow(
# entry_point='train.py',
# source_dir='.',
# py_version='py37',
# framework_version='2.4.1',
# role=get_execution_role(),
# instance_count=4,
# instance_type='ml.p4d.24xlarge',
# distribution=distribution,
# output_path='s3://my-bucket/my-output/',
# checkpoint_s3_uri='s3://my-bucket/my-checkpoints/',
# model_dir='s3://my-bucket/my-model/',
# hyperparameters={'config': 'my-config.yaml'},
# volume_size=500,
# code_location='s3://my-bucket/my-code/',
# )
# ```
#
# The estimator forms the basic configuration of your training job.
#
# SageMaker will first launch `instance_count=4` `instance_type=ml.p4d.24xlarge` instances. The `role` is an IAM role that SageMaker will use to launch instances on your behalf. SageMaker includes a `get_execution_role` function which grabs the execution role of your current instance. Each instance will have a `volume_size=500` EBS volume attached for your model and data. On `ml.p4d.24xlarge` and `ml.p3dn.24xlarge` instance types, SageMaker will automatically set up the [Elastic Fabric Adapter](https://aws.amazon.com/hpc/efa/). EFA provides up to 400 GB/s communication between your training nodes, as well as [GPU Direct RDMA](https://aws.amazon.com/about-aws/whats-new/2020/11/efa-supports-nvidia-gpudirect-rdma/) on `ml.p4d.24xlarge`, which allows your GPUs to bypass the host and communicate directly with each other across nodes.
#
# Next, SageMaker we copy all the contents of `source_dir='.'` first to the `code_location='s3://my-bucket/my-code/'` S3 location, then to each of your instances. One common mistake is to leave large files or data in this directory or its subdirectories. This will slow down your launch times, or can even cause the launch to hang. Make sure to keep your working data and model artifacts elsewhere on your Studio instance so you don't accidently copy them to your training instance. You should instead use `Channels` to copy data and model artifacts, which we'll cover shortly.
#
# SageMaker will then download the training Docker image to all your instances. Which container you download is determined by `py_version='py37'` and `framework_version='2.4.1'`. You can also use your own [custom Docker image](https://aws.amazon.com/blogs/machine-learning/bringing-your-own-custom-container-image-to-amazon-sagemaker-studio-notebooks/) by specifying an ECR address with the `image_uri` option. SageMakerCV currently works with TensorFlow versions 2.3-2.5.
#
# Before starting training, SageMaker will check your source directory for a `setup.py` file, and install if one is present. Then SageMaker will launch training, via `entry_point='train.py'`. Anything in `hyperparameters={'config': 'my-config.yaml'}` will be passed to the training script as a command line argument (ie `python train.py --config my-config.yaml`). The distribution will determine what form of distributed training to launch. This will be covered in more detail later.
#
# During training, anything written to `/opt/ml/checkpoints` on your training instances will be synced to `checkpoint_s3_uri='s3://my-bucket/my-checkpoints/'` at the same time. This can be useful for checkpointing a model you might want to restart later, or for writting Tensorboard logs to monitor your training.
#
# When training complets, you can write your model artifacats to `/opt/ml/model` and it will save to `model_dir='s3://my-bucket/my-model/'`. Another option is to also write model artifacts to your checkpoints file.
#
# Training logs, and any failure messages will to written to `/opt/ml/output` and saved to `output_path='s3://my-bucket/my-output/'`.
from sagemaker import get_execution_role
from sagemaker.tensorflow import TensorFlow
from datetime import datetime
# First we need to set some names. You want `AWS_DEFAULT_REGION` to be the same region as the S3 bucket your created earlier, to ensure your training jobs are reading from nearby S3 buckets.
#
# Next, set a `user_id`. This is just for naming your training job so it's easier to find later. This can be anything you like. We also get the current date and time to make organizing training jobs a little easier.
# +
# explain region. Don't launch a training job in VA with S3 bucket in OR
os.environ['AWS_DEFAULT_REGION'] = region # This is the region we set at the beginning, when creating the S3 bucket for our data
# this is all for naming
user_id="jbsnyder-smcv-tutorial" # This is used for naming your training job, and organizing your results on S3. It can be anything you like.
date_str=datetime.now().strftime("%d-%m-%Y") # use the data and time to keep track of training jobs and organize results in S3
time_str=datetime.now().strftime("%d-%m-%Y-%H-%M-%S")
# -
# For instance type, we'll use an `ml.p4d.24xlarge`. We recommend this instance type for large training. It includes the latest A100 Nvidia GPUs, which can train several times faster than the previous generation. If you would rather train part way on smaller instanes, `ml.p3.2xlarge, ml.p3.8xlarge, ml.p3.16xlarge, ml.p3dn.24xlarge, ml.g4dn.12xlarge` are all good options. In particular, if you're looking for a low cost way to try a short distributed training, but aren't worried about the model fully converging, we recommend the `ml.g4dn.12xlarge` which uses 4 Nvidia T4 GPUs per node.
#
# `s3_location` will be the base S3 storage location we used earlier for the COCO data. For `role` we get the execution role from our studio instance. For `source_dir` we use the current directory. Again, make sure you haven't accidently written any large files to this directory.
# specify training type, s3 src and nodes
instance_type="ml.p4d.24xlarge" # This can be any of 'ml.p3dn.24xlarge', 'ml.p4d.24xlarge', 'ml.p3.16xlarge', 'ml.p3.8xlarge', 'ml.p3.2xlarge', 'ml.g4dn.12xlarge'
nodes=4 # number of training nodes
s3_location=os.path.join("s3://", S3_BUCKET, S3_DIR)
role=get_execution_role() #give Sagemaker permission to launch nodes on our behalf
source_dir='.'
entry_point='train.py'
# ***
#
# Let's modify our previous training configuration for multinode. We don't need to change much. We'll increase the batch size since we have more and large GPUs. For A100 GPUs a batch size of 12 per GPU works well. For V100 and T4 GPUs, a batch size of 6 per GPU is recommended. Make sure to lower the learning rate and increase your number of training steps if you decrease the batch size. For example, if you want to train on 2 `ml.g4dn.12xlarge` instances, you'll have 8 T4 GPUs. A batch size of `cfg.INPUT.TRAIN_BATCH_SIZE = 32`, with inference batch size of `cfg.INPUT.EVAL_BATCH_SIZE = 16`, learning rate of `cfg.SOLVER.LR = .008`, and training steps of `cfg.SOLVER.MAX_ITERS = 25000`` is probably about right.
from configs import cfg
cfg.LOG_INTERVAL = 50 # Number of training steps between logging interval
cfg.MODEL.DENSE.PRE_NMS_TOP_N_TRAIN = 2000 # Top regions of interest to select before NMS
cfg.MODEL.DENSE.POST_NMS_TOP_N_TRAIN = 1000 # Top regions of interest to select after NMS
cfg.MODEL.RCNN.ROI_HEAD = "StandardRoIHead"
cfg.MODEL.FRCNN.LOSS_TYPE = "giou"
cfg.MODEL.FRCNN.LABEL_SMOOTHING = 0.1 # label smoothing for box head
cfg.INPUT.TRAIN_BATCH_SIZE = 256 # Training batch size
cfg.INPUT.EVAL_BATCH_SIZE = 128 # Training batch size
cfg.SOLVER.SCHEDULE = "CosineDecay" # Learning rate schedule, either CosineDecay or PiecewiseConstantDecay
cfg.SOLVER.OPTIMIZER = "NovoGrad" # Optimizer type NovoGrad or Momentum
cfg.SOLVER.LR = .042 # Base learning rate after warmup
cfg.SOLVER.BETA_1 = 0.9 # NovoGrad beta 1 value
cfg.SOLVER.BETA_2 = 0.3 # NovoGRad beta 2 value
cfg.SOLVER.ALPHA = 0.001 # scehduler final alpha
cfg.SOLVER.WEIGHT_DECAY = 0.001 # weight decay
cfg.SOLVER.MAX_ITERS = 5500 # Total training steps
cfg.SOLVER.WARMUP_STEPS = 500 # warmup steps
cfg.SOLVER.XLA = True # Train with XLA
cfg.SOLVER.FP16 = True # Train with mixed precision enables
cfg.SOLVER.TF32 = True # Train with TF32 data type enabled, only available on Ampere GPUs and TF 2.4 and up
cfg.SOLVER.EVAL_EPOCH_EVAL = False # Only run eval at end
cfg.HOOKS=["CheckpointHook",
"IterTimerHook",
"TextLoggerHook",
"CocoEvaluator"]
# ***
# Earlier we mentioned the `distrbution` strategy in SageMaker. Distributed training can be either multi GPU single node (ie training on 8 GPU in a single ml.p4d.24xlarge) or mutli GPU multi node (ie training on 32 GPUs across 4 ml.p4d.24xlarges). For TensorFlow SageMakerCV uses either Horovod or [SageMaker Distributed Data Parallel](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel.html) (SMDDP). For single node multi GPU, or multi node on small instances, we recommend Horovod. For multinode on large instance types, SMDDP is built to fully utilize AWS network topology, and EFA, providing improved scaling efficiency.
#
# To enable SMDDP, set `distribution = { "smdistributed": { "dataparallel": { "enabled": True } } }`. SageMakerCV already has SMDDP integrated. To implement SMDDP for your own models, follow [these instructions](https://docs.aws.amazon.com/sagemaker/latest/dg/data-parallel-intro.html). SMDDP will launch training from the first node in your cluster using [MPI](https://www.open-mpi.org/).
#
# For Horovod based training, we can call MPI directory by setting `distribution = {"mpi": {"enabled": True,}}`.
if nodes>1 and instance_type in ['ml.p3dn.24xlarge', 'ml.p4d.24xlarge', 'ml.p3.16xlarge']:
distribution = { "smdistributed": { "dataparallel": { "enabled": True } } }
else:
distribution = {"mpi": {"enabled": True,}}
# ***
# We'll set a job name based on the user name and time. We'll then set output directories on S3 using the date and job name.
#
# For this training, we'll use the same S3 location for all 3 SageMaker model outputs `/opt/ml/checkpoint`, `/opt/ml/model`, and `/opt/ml/output`.
job_name = f'{user_id}-{time_str}' # Set the job name to user id and the current time
output_path = os.path.join(s3_location, "sagemaker-output", date_str, job_name) # Organizes results on S3 by date and job name
code_location = os.path.join(s3_location, "sagemaker-code", date_str, job_name)
# ***
# Next we need to add our data sources to our configuration file, but first let's talk a little more about how SageMaker gets data to your instance.
#
# The most straightforward way to get your data is using "Channels." These are S3 locations you specify in a dictionary when you launch a training job. For example, let's say you launch a training job with:
#
# ```
# channels = {'train': 's3://my-bucket/data/train/',
# 'test': 's3://my-bucket/data/test/',
# 'weights': 's3://my-bucket/data/weights/',
# 'dave': 's3://my-bucket/data/daves_weird_data/'}
#
# pytorch_estimator.fit(channels)
# ```
#
# At the start of training, SageMaker will create a set of corresponding directories on each training node:
#
# ```
# /opt/ml/input/data/train/
# /opt/ml/input/data/test/
# /opt/ml/input/data/weights/
# /opt/ml/input/data/dave/
# ```
#
# SageMaker will then copy all the contents of the corresponding S3 locations to these directories, which you can then access in training.
#
# One downside of setting up channels like this is that it requires all the data to be downloaded to your instance at the start of of training, which can delay the training launch if you're dealing with a large dataset.
#
# We have two ways to speed up launch. The first is [Fast File Mode](https://aws.amazon.com/about-aws/whats-new/2021/10/amazon-sagemaker-fast-file-mode/) which downloads data from S3 as it's requested by the training model, speeding up your launch time. You can use fast file mode by sepcifying `TrainingInputMode='FastFile'` in your SageMaker estimator configuration.
#
# If you're dealing with really large datasets, you might prefer to instead continuously stream data from S3. Luckily, this feature is already supported in TensorFlow and SageMakerCV. If you provide the dataset builder with an S3 file pattern, it will stream TFRecords from S3 instead of reading them locally.
#
# In our case, we'll use a mix of channels and streaming from S3. We'll download the smaller pieces at the start of training (the validation data, pretrained weights, and image annotations), and we'll stream our training data directly from S3 during training.
#
# First, we setup our training channels. These are the locations where we earlier uploaded our COCO data, annotations, and weights.
channels = {'val2017': os.path.join(s3_location, 'data', 'coco', 'tfrecord', 'val2017'),
'annotations': os.path.join(s3_location, 'data', 'coco', 'annotations'),
'weights': os.path.join(s3_location, 'data', 'weights', 'resnet')}
# Now we setup the data sources in our configuration. The train file pattern will take and S3 location. The others are all set to the corresponding directory for each channel. We also set the output directory to be the SageMaker checkpoint directory, which will sync to our S3 output location.
CHANNELS_DIR='/opt/ml/input/data/' # on node
cfg.PATHS.TRAIN_FILE_PATTERN = os.path.join(s3_location, 'data', 'coco', 'tfrecord', 'train2017', 'train*')
cfg.PATHS.VAL_FILE_PATTERN = os.path.join(CHANNELS_DIR, "val2017", "val*")
cfg.PATHS.WEIGHTS = os.path.join(CHANNELS_DIR, "weights", "resnet50.ckpt")
cfg.PATHS.VAL_ANNOTATIONS = os.path.join(CHANNELS_DIR, "annotations", "instances_val2017.json")
cfg.PATHS.OUT_DIR = '/opt/ml/checkpoints'
# Save the configuration file.
dist_config_file = f"configs/dist-training-config.yaml"
with open(dist_config_file, 'w') as outfile:
with redirect_stdout(outfile): print(cfg.dump())
# Set the config file as a hyperparameter so it will be passed a command line arg when training launches.
hyperparameters = {"config": dist_config_file}
# And now we can launch training. With 4 P4d instances, this takes about an hour. This section will also print a lot of output logs. By setting `wait=False` you can avoid printing logs in the notebook. This setting will just launch the job then return, and is useful for when you want to launch several jobs at the same time. You can then montior each job from the [SageMaker Training Console](https://us-west-2.console.aws.amazon.com/sagemaker).
estimator = TensorFlow(
entry_point=entry_point,
source_dir=source_dir,
py_version='py37',
framework_version='2.4.1',
role=role,
instance_count=nodes,
instance_type=instance_type,
distribution=distribution,
output_path=output_path,
checkpoint_s3_uri=output_path,
model_dir=output_path,
hyperparameters=hyperparameters,
volume_size=500,
disable_profiler=True,
debugger_hook_config=False,
code_location=code_location,
)
estimator.fit(channels, wait=True, job_name=job_name)
# ***
# ### Visualizing Results
#
# And there you have it, a fully trained Mask RCNN model in about an hour. Now let's see how our model does on prediction by actually visualizing the output.
#
# Our model is stored at the S3 location we gave to the training job in `output_path`. The checkpointer hook creates a `trained_model` directory and stores the final checkpoint there. We'll need to grab the results and store them on our studio instance so we can check performance, and visualize the output.
s3fs = S3FileSystem()
model_loc = os.path.join(estimator.output_path, 'trained_model', 'model.h5')
# Copy the model from S3 to our Studio instance.
s3fs.get(model_loc, model_loc.split('/')[-1])
# We can load the trained model weights into the detector model we created earlier for the local training.
detector.load_weights(model_loc.split('/')[-1])
# Like we did for the local model, let's grab a random image from the dataset and visualize the model's predictions.
features, labels = next(dataset)
result = detector(features, training=False)
image_num = 3 # image number within the batch
image = restore_image(result['images'][image_num], features['image_info'][image_num]) # converts the image back to its original shape and color
boxes = result['detection_boxes'][image_num]
classes = result['detection_classes'][image_num]
scores = result['detection_scores'][image_num]
detection_image = build_image(image, boxes, scores, classes, coco_categories, threshold=0.8)
plt.figure(figsize = (15, 15))
plt.imshow(detection_image)
# #### Conclusion
#
# In this notebook, we've walked through the entire process of training Mask RCNN on SageMaker. We've implemented several of SageMaker's more advanced features, such as distributed training, EFA, and streaming data directly from S3. From here you can use the provided template datasets to train on your own data, or modify the framework with your own object detection model.
#
# When you're done, make sure to check that all of your SageMaker training jobs have stopped by checking the [SageMaker Training Console](https://us-west-2.console.aws.amazon.com/sagemaker). Also check that you've stopped any Studio instance you have running by selecting the session monitor on the left (the circle with a square in it), and clicking the power button next to any running instances. Your files will still be saved on the Studio EBS volume.
#
# <img src="../assets/running_instances.png" style="width: 600px">
| tensorflow/Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
from __future__ import print_function, division
import matplotlib
#matplotlib.use('nbagg') # interactive plots in iPython. New in matplotlib v1.4
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from matplotlib import ticker
import nilmtk
import numpy as np
# +
from lasagne.layers import (InputLayer, DenseLayer, ReshapeLayer,
DimshuffleLayer, Conv1DLayer, PadLayer)
from lasagne.nonlinearities import rectify, tanh
from neuralnilm.data.loadactivations import load_nilmtk_activations
from neuralnilm.data.syntheticaggregatesource import SyntheticAggregateSource
from neuralnilm.data.realaggregatesource import RealAggregateSource
from neuralnilm.data.stridesource import StrideSource
from neuralnilm.data.datapipeline import DataPipeline
from neuralnilm.data.processing import DivideBy, IndependentlyCenter
from neuralnilm.net import Net, build_net
from neuralnilm.trainer import Trainer
from neuralnilm.metrics import Metrics
from neuralnilm.utils import select_windows, filter_activations, configure_logger
from neuralnilm.layers import BLSTMLayer
from neuralnilm.rectangles import plot_rectangles
# -
configure_logger()
NILMTK_FILENAME = '/data/mine/vadeec/merged/ukdale.h5'
NUM_SEQ_PER_BATCH = 64
SAMPLE_PERIOD = 6
STRIDE = 16
APPLIANCES = [
'kettle', 'microwave', 'washing machine', 'dish washer', 'fridge']
WINDOWS = {
'train': {
1: ("2013-04-12", "2013-06-01")
},
'unseen_activations_of_seen_appliances': {
1: ("2014-12-16", "2015-01-02")
},
'unseen_appliances': {
5: ("2014-09-01", None)
}
}
APPLIANCE = 'fridge'
def get_pipeline(target_appliance, activations):
if target_appliance == 'kettle':
seq_length = 128
max_appliance_power = 3100
elif target_appliance == 'microwave':
seq_length = 288
max_appliance_power = 3000
elif target_appliance == 'washing machine':
seq_length = 1024
max_appliance_power = 2500
elif target_appliance == 'fridge':
seq_length = 512
max_appliance_power = 300
elif target_appliance == 'dish washer':
seq_length = 1024 + 512
max_appliance_power = 2500
"""
synthetic_agg_source = SyntheticAggregateSource(
activations=activations,
target_appliance=target_appliance,
seq_length=seq_length,
sample_period=SAMPLE_PERIOD,
distractor_inclusion_prob=0.25,
target_inclusion_prob=1,
allow_incomplete_target=False
)
"""
real_agg_source = RealAggregateSource(
activations=activations,
target_appliance=target_appliance,
seq_length=seq_length,
filename=NILMTK_FILENAME,
windows=WINDOWS,
sample_period=SAMPLE_PERIOD,
target_inclusion_prob=1,
allow_incomplete_target=False
)
input_std = 374.43884277
pipeline = DataPipeline(
[real_agg_source],
num_seq_per_batch=NUM_SEQ_PER_BATCH,
input_processing=[DivideBy(input_std), IndependentlyCenter()],
target_processing=[DivideBy(max_appliance_power)]
)
return pipeline
def get_params_filename(appliance, arch):
if arch == 'rnn':
experiment = 'e566'
else:
experiment = 'e567'
full_exp_name = experiment + '_' + appliance + '_' + arch
PATH = '/storage/experiments/neuralnilm/figures/'
return PATH + full_exp_name + '/' + full_exp_name + '.hdf5'
def get_ae(batch, appliance):
NUM_FILTERS = 8
input_shape = batch.input.shape
target_shape = input_shape
seq_length = input_shape[1]
output_layer = build_net(
input_shape=input_shape,
layers=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': NUM_FILTERS,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'pad': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 128,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': (seq_length - 3) * NUM_FILTERS,
'nonlinearity': rectify
},
{
'type': ReshapeLayer,
'shape': (-1, (seq_length - 3), NUM_FILTERS)
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{ # DeConv
'type': Conv1DLayer,
'num_filters': 1,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'pad': 'full'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
}
]
)
ARCH = 'ae'
net = Net(output_layer, description=ARCH)
net.load_params(get_params_filename(appliance=appliance, arch=ARCH), 100000)
return net
def get_rnn(batch, appliance):
input_shape = batch.input.shape
target_shape = input_shape
seq_length = input_shape[1]
output_layer = build_net(
input_shape=input_shape,
layers=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'pad': 'same'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': BLSTMLayer,
'num_units': 128,
'merge_mode': 'concatenate',
'grad_clipping': 10.0,
'gradient_steps': 500
},
{
'type': BLSTMLayer,
'num_units': 256,
'merge_mode': 'concatenate',
'grad_clipping': 10.0,
'gradient_steps': 500
},
{
'type': ReshapeLayer,
'shape': (input_shape[0] * input_shape[1], 512)
},
{
'type': DenseLayer,
'num_units': 128,
'nonlinearity': tanh
},
{
'type': DenseLayer,
'num_units': 1,
'nonlinearity': None
},
{
'type': ReshapeLayer,
'shape': target_shape
}
]
)
ARCH = 'rnn'
net = Net(output_layer, description=ARCH)
net.load_params(get_params_filename(appliance=appliance, arch=ARCH), 10000)
return net
def get_rectangles_net(batch, appliance):
input_shape = batch.input.shape
target_shape = (input_shape[0], 3, 1)
output_layer = build_net(
input_shape=input_shape,
layers=[
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': PadLayer,
'width': 4
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'pad': 'valid'
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_size': 4,
'stride': 1,
'nonlinearity': None,
'pad': 'valid'
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': 512 * 8,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 512 * 6,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 512 * 4,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 512,
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 3,
'nonlinearity': None
},
{
'type': ReshapeLayer,
'shape': target_shape
}
]
)
ARCH = 'rectangles'
net = Net(output_layer, description=ARCH)
net.load_params(get_params_filename(appliance=appliance, arch=ARCH), 300000)
return net
activations = load_nilmtk_activations(
appliances=APPLIANCES,
filename=NILMTK_FILENAME,
sample_period=SAMPLE_PERIOD,
windows=WINDOWS
)
# +
pipeline = get_pipeline(APPLIANCE, activations)
# -
batch = pipeline.get_batch(fold='unseen_activations_of_seen_appliances')
seq_length = batch.input.shape[1]
np.save(APPLIANCE + '_input', batch.input)
np.save(APPLIANCE + '_target', batch.target)
# Run the nets :)
fig = None
outputs = {}
for net_getter in [get_ae, get_rnn, get_rectangles_net]:
net = net_getter(batch, APPLIANCE)
net_name = net.description
print("Getting output for", net_name)
output = net.deterministic_output_func(batch.after_processing.input)
outputs[net_name] = output
for arch, output in outputs.iteritems():
np.save(APPLIANCE + '_output_' + arch, output)
# +
SEQ_I = 23
# 3, 4, 17, 18, 19, 33, 34, 37, 60 is good and has a fair amount of background
# 15, 16, 24, 26, 38, 44 are interesting!
# 10 not bad
# 22 shows all nets classifying a defrost cycle as a normal fridge cycle
# 62 pretty good (and is a defrost cycle)
# 23, 25, 29, 31 (tricky!), 35, 39, 40, 45, 49, 51, 54, 61 are nice
# FAVORITE = 23
if fig is not None:
plt.close(fig)
fig, axes = plt.subplots(2, sharex=True, figsize=(12,6))
fig.suptitle("Sequence {}".format(SEQ_I))
# Plot target
axes[0].plot(batch.target[SEQ_I, :, 0], label='Target')
# Plot net outputs
for net_name, output in outputs.iteritems():
if net_name == 'rectangles':
plot_rectangles(axes[0], output[SEQ_I], plot_seq_width=seq_length)
else:
axes[0].plot(output[SEQ_I, :, 0], label=net_name)
axes[0].legend()
axes[0].set_ylim((0, 0.5))
axes[0].set_title(APPLIANCE.title())
# Plot aggregate input
axes[1].plot(batch.input[SEQ_I, :, 0])
axes[1].set_title('Aggregate')
axes[1].set_xlim((0, seq_length))
plt.show()
# +
# 18, 34, 46 is good, but no background
# 41 has background and is bpretty good for AE and rectangles
# 55 has lots of background and is surprisingly good
#np.save('fridge_input_1.npy', batch.input)
#np.save('fridge_target_1.npy', batch.target)
# +
# 0, 4, 6, 13, 15, 18, 19, 26, 27, 29, 52 good but no background
# 40 has lots of background and is pretty good
# 54 has lots of background and is pretty good, not great for RNN though
# 58, 62 aren't terrible
#np.save('fridge_input_2.npy', batch.input)
#np.save('fridge_target_2.npy', batch.target)
# +
# 3, 4, 17, 18, 19, 33, 34, 37, 60 is good and has a fair amount of background
# 15, 16, 24, 26, 38, 44 are interesting!
# 10 not bad
# 22 shows all nets classifying a defrost cycle as a normal fridge cycle
# 62 pretty good (and is a defrost cycle)
# 23, 25, 29, 31 (tricky!), 35, 39, 40, 45, 49, 51, 54, 61 are nice
# FAVORITE = 23
#np.save('fridge_input.npy', batch.input)
#np.save('fridge_target.npy', batch.target)
# -
from neuralnilm.data.stride import stride
batches = stride(batch.input[23, :, 0], num_seq_per_batch=64, seq_length=batch.input.shape[1], stride=STRIDE)
len(batches)
plt.close()
strided_batch = batches[0]
N = 8
fig, axes = plt.subplots(N, sharey=True)
for i in range(N):
axes[i].plot(strided_batch[i+8, :, 0])
plt.show()
strided_outputs = {}
for net_getter in [get_ae, get_rnn, get_rectangles_net]:
net = net_getter(batch, APPLIANCE)
net_name = net.description
print("Getting output for", net_name)
output = net.deterministic_output_func(strided_batch)
strided_outputs[net_name] = output
for arch, output in strided_outputs.iteritems():
np.save(APPLIANCE + '_strided_output_' + arch, output)
# +
import seaborn as sns
sns.reset_orig()
plt.close()
fig, ax = plt.subplots(1)
seq_length = batch.input.shape[1]
palette = sns.palettes.color_palette('bright', n_colors=3)
for arch_i, (arch, output) in enumerate(strided_outputs.iteritems()):
for seq_i in range(NUM_SEQ_PER_BATCH):
start = seq_i * STRIDE
end = start + seq_length
x = np.arange(start, end)
color = palette[arch_i]
if arch == 'rectangles':
pass
plot_rectangles(
ax, output[seq_i], plot_seq_width=seq_length, offset=start,
alpha=0.05, color=color, zorder=0)
#ax.plot(x, strided_batch[seq_i], color='k', linewidth=0.5, alpha=0.05)
else:
pass
if arch == 'ae':
zorder = 5
data = output[seq_i, :, 0]
elif arch == 'rnn':
zorder = 10
x = x[50:-50]
data = output[seq_i, 50:-50, 0]
ax.plot(x, data, color=color, alpha=0.2, linewidth=2, zorder=zorder)
plt.show()
# -
| notebooks/.ipynb_checkpoints/save_net_outputs-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="o_0K1lsW1dj9"
"""
You can run either this notebook locally (if you have all the dependencies and a GPU) or on Google Colab.
Instructions for setting up Colab are as follows:
1. Open a new Python 3 notebook.
2. Import this notebook from GitHub (File -> Upload Notebook -> "GITHUB" tab -> copy/paste GitHub URL)
3. Connect to an instance with a GPU (Runtime -> Change runtime type -> select "GPU" for hardware accelerator)
4. Run this cell to set up dependencies.
"""
# If you're using Google Colab and not running locally, run this cell
# install NeMo
BRANCH = 'main'
# !python -m pip install git+https://github.com/NVIDIA/NeMo.git@$BRANCH#egg=nemo_toolkit[nlp]
# + pycharm={"name": "#%%\n"}
# If you're not using Colab, you might need to upgrade jupyter notebook to avoid the following error:
# 'ImportError: IProgress not found. Please update jupyter and ipywidgets.'
# ! pip install ipywidgets
# ! jupyter nbextension enable --py widgetsnbextension
# Please restart the kernel after running this cell
# + colab={} colab_type="code" id="dzqD2WDFOIN-"
from nemo.collections import nlp as nemo_nlp
from nemo.utils.exp_manager import exp_manager
import os
import wget
import torch
import pytorch_lightning as pl
from omegaconf import OmegaConf
# + [markdown] colab_type="text" id="daYw_Xll2ZR9"
# In this tutorial, we are going to describe how to finetune BioMegatron - a [BERT](https://arxiv.org/abs/1810.04805)-like [Megatron-LM](https://arxiv.org/pdf/1909.08053.pdf) model pre-trained on large biomedical text corpus ([PubMed](https://pubmed.ncbi.nlm.nih.gov/) abstracts and full-text commercial use collection) - on [RE: Text mining chemical-protein interactions (CHEMPROT)](https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/).
#
# The model size of Megatron-LM can be larger than BERT, up to multi-billion parameters, compared to 345 million parameters of BERT-large.
# There are some alternatives of BioMegatron, most notably [BioBERT](https://arxiv.org/abs/1901.08746). Compared to BioBERT BioMegatron is larger by model size and pre-trained on larger text corpus.
#
# A more general tutorial of using BERT-based models, including Megatron-LM, for downstream natural language processing tasks can be found [here](https://github.com/NVIDIA/NeMo/blob/main/tutorials/nlp/01_Pretrained_Language_Models_for_Downstream_Tasks.ipynb).
# -
# # Task Description
# **Relation Extraction (RE)** can be regarded as a type of sentence classification.
#
# The task is to classify the relation of a [GENE] and [CHEMICAL] in a sentence, for example like the following:
# ```html
# 14967461.T1.T22 <@CHEMICAL$> inhibitors currently under investigation include the small molecules <@GENE$> (Iressa, ZD1839) and erlotinib (Tarceva, OSI-774), as well as monoclonal antibodies such as cetuximab (IMC-225, Erbitux). <CPR:4>
# 14967461.T2.T22 <@CHEMICAL$> inhibitors currently under investigation include the small molecules gefitinib (<@GENE$>, ZD1839) and erlotinib (Tarceva, OSI-774), as well as monoclonal antibodies such as cetuximab (IMC-225, Erbitux). <CPR:4>
# ```
# to one of the following class:
#
# | Relation Class | Relations |
# | ----------- | ----------- |
# | CPR:3 | Upregulator and activator |
# | CPR:4 | Downregulator and inhibitor |
# | CPR:5 | Agonist |
# | CPR:6 | Antagonist |
# | CPR:9 | Substrate and product of |
# + [markdown] colab_type="text" id="ZnuziSwJ1yEB"
# # Datasets
#
# Details of ChemProt Relation Extraction task and the original data can be found on the [BioCreative VI website](https://biocreative.bioinformatics.udel.edu/tasks/biocreative-vi/track-5/)
#
# ChemProt dataset pre-processed for easier consumption can be downloaded from [here](https://github.com/arwhirang/recursive_chemprot/blob/master/Demo/tree_LSTM/data/chemprot-data_treeLSTM.zip) or [here](https://github.com/ncbi-nlp/BLUE_Benchmark/releases/download/0.1/bert_data.zip)
# + colab={} colab_type="code" id="--wJ2891aIIE"
TASK = 'ChemProt'
DATA_DIR = 'DATA_DIR'
WORK_DIR = 'WORK_DIR'
MODEL_CONFIG = 'text_classification_config.yaml'
# -
os.makedirs(DATA_DIR, exist_ok=True)
os.makedirs(os.path.join(DATA_DIR, 'RE'), exist_ok=True)
os.makedirs(WORK_DIR, exist_ok=True)
# download the dataset
wget.download('https://github.com/arwhirang/recursive_chemprot/blob/master/Demo/tree_LSTM/data/chemprot-data_treeLSTM.zip?raw=true',
os.path.join(DATA_DIR, 'data_re.zip'))
# !unzip -o DATA_DIR/data_re.zip -d DATA_DIR/RE
# + colab={} colab_type="code" id="qB0oLE4R9EhJ"
# ! ls -l $DATA_DIR/RE
# -
# ## Pre-process dataset
# Let's convert the dataset into the format that is compatible for [NeMo text-classification module](https://github.com/NVIDIA/NeMo/blob/main/examples/nlp/text_classification/text_classification_with_bert.py).
wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/text_classification/data/import_datasets.py')
# ! python import_datasets.py --dataset_name=chemprot --source_data_dir=DATA_DIR/RE --target_data_dir=DATA_DIR/RE
# let's take a look at the training data
# ! head -n 5 {DATA_DIR}/RE/train.tsv
# let's check the label mapping
# ! cat {DATA_DIR}/RE/label_mapping.tsv
# It is not necessary to have the mapping exactly like this - it can be different.
# We use the same [mapping used by BioBERT](https://github.com/dmis-lab/biobert/blob/master/run_re.py#L438) so that comparison can be more straightforward.
# + [markdown] colab_type="text" id="_whKCxfTMo6Y"
# # Model configuration
#
# Now, let's take a closer look at the model's configuration and learn to train the model.
#
# The model is defined in a config file which declares multiple important sections. They are:
# - **model**: All arguments that are related to the Model - language model, a classifier, optimizer and schedulers, datasets and any other related information
#
# - **trainer**: Any argument to be passed to PyTorch Lightning
# + colab={} colab_type="code" id="T1gA8PsJ13MJ"
# download the model's configuration file
config_dir = WORK_DIR + '/configs/'
os.makedirs(config_dir, exist_ok=True)
if not os.path.exists(config_dir + MODEL_CONFIG):
print('Downloading config file...')
wget.download(f'https://raw.githubusercontent.com/NVIDIA/NeMo/{BRANCH}/examples/nlp/text_classification/conf/' + MODEL_CONFIG, config_dir)
else:
print ('config file is already exists')
# + colab={} colab_type="code" id="mX3KmWMvSUQw"
# this line will print the entire config of the model
config_path = f'{WORK_DIR}/configs/{MODEL_CONFIG}'
print(config_path)
config = OmegaConf.load(config_path)
# -
config.model.train_ds.file_path = os.path.join(DATA_DIR, 'RE', 'train.tsv')
config.model.validation_ds.file_path = os.path.join(DATA_DIR, 'RE', 'dev.tsv')
config.model.task_name = 'chemprot'
# Note: these are small batch-sizes - increase as appropriate to available GPU capacity
config.model.train_ds.batch_size=8
config.model.validation_ds.batch_size=8
config.model.dataset.num_classes=6
print(OmegaConf.to_yaml(config))
# + [markdown] colab_type="text" id="ZCgWzNBkaQLZ"
# # Model Training
# ## Setting up Data within the config
#
# Among other things, the config file contains dictionaries called **dataset**, **train_ds** and **validation_ds**. These are configurations used to setup the Dataset and DataLoaders of the corresponding config.
#
# We assume that both training and evaluation files are located in the same directory, and use the default names mentioned during the data download step.
# So, to start model training, we simply need to specify `model.dataset.data_dir`, like we are going to do below.
#
# Also notice that some config lines, including `model.dataset.data_dir`, have `???` in place of paths, this means that values for these fields are required to be specified by the user.
#
# Let's now add the data directory path, task name and output directory for saving predictions to the config.
# + colab={} colab_type="code" id="LQHCJN-ZaoLp"
config.model.task_name = TASK
config.model.output_dir = WORK_DIR
config.model.dataset.data_dir = DATA_DIR
# + [markdown] colab_type="text" id="nB96-3sTc3yk"
# ## Building the PyTorch Lightning Trainer
#
# NeMo models are primarily PyTorch Lightning modules - and therefore are entirely compatible with the PyTorch Lightning ecosystem.
#
# Let's first instantiate a Trainer object
# + colab={} colab_type="code" id="1tG4FzZ4Ui60"
print("Trainer config - \n")
print(OmegaConf.to_yaml(config.trainer))
# + colab={} colab_type="code" id="knF6QeQQdMrH"
# lets modify some trainer configs
# checks if we have GPU available and uses it
cuda = 1 if torch.cuda.is_available() else 0
config.trainer.gpus = cuda
# for PyTorch Native AMP set precision=16
config.trainer.precision = 16 if torch.cuda.is_available() else 32
# remove distributed training flags
config.trainer.accelerator = 'DDP'
trainer = pl.Trainer(**config.trainer)
# + [markdown] colab_type="text" id="8IlEMdVxdr6p"
# ## Setting up a NeMo Experiment
#
# NeMo has an experiment manager that handles logging and checkpointing for us, so let's use it:
# + colab={} colab_type="code" id="8uztqGAmdrYt"
config.exp_manager.exp_dir = WORK_DIR
exp_dir = exp_manager(trainer, config.get("exp_manager", None))
# the exp_dir provides a path to the current experiment for easy access
exp_dir = str(exp_dir)
exp_dir
# + [markdown] colab_type="text" id="8tjLhUvL_o7_"
# Before initializing the model, we might want to modify some of the model configs. Here we are modifying it to use BioMegatron, [Megatron-LM BERT](https://arxiv.org/abs/1909.08053) pre-trained on [PubMed](https://pubmed.ncbi.nlm.nih.gov/) biomedical text corpus.
# + colab={} colab_type="code" id="Xeuc2i7Y_nP5"
# complete list of supported BERT-like models
print(nemo_nlp.modules.get_pretrained_lm_models_list())
# specify BERT-like model, you want to use, for example, "megatron-bert-345m-uncased" or 'bert-base-uncased'
PRETRAINED_BERT_MODEL = "biomegatron-bert-345m-uncased"
# + colab={} colab_type="code" id="RK2xglXyAUOO"
# add the specified above model parameters to the config
config.model.language_model.pretrained_model_name = PRETRAINED_BERT_MODEL
# + [markdown] colab_type="text" id="fzNZNAVRjDD-"
# Now, we are ready to initialize our model. During the model initialization call, the dataset and data loaders we'll be prepared for training and evaluation.
# Also, the pretrained BERT model will be downloaded, note it can take up to a few minutes depending on the size of the chosen BERT model.
# + colab={} colab_type="code" id="NgsGLydWo-6-"
model = nemo_nlp.models.TextClassificationModel(cfg=config.model, trainer=trainer)
# + [markdown] colab_type="text" id="kQ592Tx4pzyB"
# ## Monitoring training progress
# Optionally, you can create a Tensorboard visualization to monitor training progress.
# If you're not using Colab, refer to [https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks](https://www.tensorflow.org/tensorboard/tensorboard_in_notebooks) if you're facing issues with running the cell below.
# + colab={} colab_type="code" id="mTJr16_pp0aS"
try:
from google import colab
COLAB_ENV = True
except (ImportError, ModuleNotFoundError):
COLAB_ENV = False
# Load the TensorBoard notebook extension
if COLAB_ENV:
# %load_ext tensorboard
# %tensorboard --logdir {exp_dir}
else:
print("To use tensorboard, please use this notebook in a Google Colab environment.")
# + colab={} colab_type="code" id="hUvnSpyjp0Dh"
# start model training
trainer.fit(model)
# + [markdown] colab_type="text" id="ref1qSonGNhP"
# ## Training Script
#
# If you have NeMo installed locally, you can also train the model with `examples/nlp/text_classification/text_classification_with_bert.py.`
#
# To run training script, use:
#
# `python text_classification_with_bert.py \
# model.dataset.data_dir=PATH_TO_DATA_DIR \
# model.task_name=TASK`
#
# -
# The training could take several minutes and the results should look something like:
#
# ```
# precision recall f1-score support
#
# 0 0.7328 0.8348 0.7805 115
# 1 0.9402 0.9291 0.9346 7950
# 2 0.8311 0.9146 0.8708 199
# 3 0.6400 0.6302 0.6351 457
# 4 0.8002 0.8317 0.8156 1093
# 5 0.7228 0.7518 0.7370 548
#
# accuracy 0.8949 10362
# macro avg 0.7778 0.8153 0.7956 10362
# weighted avg 0.8963 0.8949 0.8954 10362
# ```
| tutorials/nlp/Relation_Extraction-BioMegatron.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <h1> Time series prediction using RNNs, with TensorFlow and Cloud ML Engine </h1>
#
# This notebook illustrates:
# <ol>
# <li> Creating a Recurrent Neural Network in TensorFlow
# <li> Creating a Custom Estimator in tf.contrib.learn
# <li> Training on Cloud ML Engine
# </ol>
#
# <p>
#
# <h3> Simulate some time-series data </h3>
#
# Essentially a set of sinusoids with random amplitudes and frequencies.
import tensorflow as tf
print tf.__version__
# +
import numpy as np
import tensorflow as tf
import seaborn as sns
import pandas as pd
SEQ_LEN = 10
def create_time_series():
freq = (np.random.random()*0.5) + 0.1 # 0.1 to 0.6
ampl = np.random.random() + 0.5 # 0.5 to 1.5
x = np.sin(np.arange(0,SEQ_LEN) * freq) * ampl
return x
for i in xrange(0, 5):
sns.tsplot( create_time_series() ); # 5 series
# +
def to_csv(filename, N):
with open(filename, 'w') as ofp:
for lineno in xrange(0, N):
seq = create_time_series()
line = ",".join(map(str, seq))
ofp.write(line + '\n')
to_csv('train.csv', 1000) # 1000 sequences
to_csv('valid.csv', 50)
# -
# !head -5 train.csv valid.csv
# <h2> RNN </h2>
#
# For more info, see:
# <ol>
# <li> http://colah.github.io/posts/2015-08-Understanding-LSTMs/ for the theory
# <li> https://www.tensorflow.org/tutorials/recurrent for explanations
# <li> https://github.com/tensorflow/models/tree/master/tutorials/rnn/ptb for sample code
# </ol>
#
# Here, we are trying to predict from 9 values of a timeseries, the tenth value.
#
# <p>
#
# <h3> Imports </h3>
#
# Several tensorflow packages and shutil
import tensorflow as tf
import shutil
import tensorflow.contrib.learn as tflearn
import tensorflow.contrib.layers as tflayers
from tensorflow.contrib.learn.python.learn import learn_runner
import tensorflow.contrib.metrics as metrics
import tensorflow.contrib.rnn as rnn
# <h3> Input Fn to read CSV </h3>
#
# Our CSV file structure is quite simple -- a bunch of floating point numbers (note the type of DEFAULTS). We ask for the data to be read BATCH_SIZE sequences at a time. The Estimator API in tf.contrib.learn wants the features returned as a dict. We'll just call this timeseries column 'rawdata'.
# <p>
# Our CSV file sequences consist of 10 numbers. We'll assume that 9 of them are inputs and we need to predict the last one.
DEFAULTS = [[0.0] for x in xrange(0, SEQ_LEN)]
BATCH_SIZE = 20
TIMESERIES_COL = 'rawdata'
N_OUTPUTS = 1 # in each sequence, 1-8 are features, and 10 is label
N_INPUTS = SEQ_LEN - N_OUTPUTS
# Reading data using the Estimator API in tf.learn requires an input_fn. This input_fn needs to return a dict of features and the corresponding labels.
# <p>
# So, we read the CSV file. The Tensor format here will be batchsize x 1 -- entire line. We then decode the CSV. At this point, all_data will contain a list of Tensors. Each tensor has a shape batchsize x 1. There will be 10 of these tensors, since SEQ_LEN is 10.
# <p>
# We split these 10 into 9 and 1 (N_OUTPUTS is 1). Put the 9 into a dict, call it features. The other is the ground truth, so labels.
# read data and convert to needed format
def read_dataset(filename, mode=tf.contrib.learn.ModeKeys.TRAIN):
def _input_fn():
num_epochs = 100 if mode == tf.contrib.learn.ModeKeys.TRAIN else 1
# could be a path to one file or a file pattern.
input_file_names = tf.train.match_filenames_once(filename)
filename_queue = tf.train.string_input_producer(
input_file_names, num_epochs=num_epochs, shuffle=True)
reader = tf.TextLineReader()
_, value = reader.read_up_to(filename_queue, num_records=BATCH_SIZE)
value_column = tf.expand_dims(value, -1)
print 'readcsv={}'.format(value_column)
# all_data is a list of tensors
all_data = tf.decode_csv(value_column, record_defaults=DEFAULTS)
inputs = all_data[:len(all_data)-N_OUTPUTS] # first few values
label = all_data[len(all_data)-N_OUTPUTS : ] # last few values
# from list of tensors to tensor with one more dimension
inputs = tf.concat(inputs, axis=1)
label = tf.concat(label, axis=1)
print 'inputs={}'.format(inputs)
return {TIMESERIES_COL: inputs}, label # dict of features, label
return _input_fn
# <h3> Define RNN </h3>
#
# A recursive neural network consists of possibly stacked LSTM cells.
# <p>
# The RNN has one output per input, so it will have 8 output cells. We use only the last output cell, but rather use it directly, we do a matrix multiplication of that cell by a set of weights to get the actual predictions. This allows for a degree of scaling between inputs and predictions if necessary (we don't really need it in this problem).
# <p>
# Finally, to supply a model function to the Estimator API, you need to return a ModelFnOps. The rest of the function creates the necessary objects.
# +
LSTM_SIZE = 3 # number of hidden layers in each of the LSTM cells
# create the inference model
def simple_rnn(features, targets, mode):
# 0. Reformat input shape to become a sequence
x = tf.split(features[TIMESERIES_COL], N_INPUTS, 1)
#print 'x={}'.format(x)
# 1. configure the RNN
lstm_cell = rnn.BasicLSTMCell(LSTM_SIZE, forget_bias=1.0)
outputs, _ = rnn.static_rnn(lstm_cell, x, dtype=tf.float32)
# slice to keep only the last cell of the RNN
outputs = outputs[-1]
#print 'last outputs={}'.format(outputs)
# output is result of linear activation of last layer of RNN
weight = tf.Variable(tf.random_normal([LSTM_SIZE, N_OUTPUTS]))
bias = tf.Variable(tf.random_normal([N_OUTPUTS]))
predictions = tf.matmul(outputs, weight) + bias
# 2. loss function, training/eval ops
if mode == tf.contrib.learn.ModeKeys.TRAIN or mode == tf.contrib.learn.ModeKeys.EVAL:
loss = tf.losses.mean_squared_error(targets, predictions)
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=0.01,
optimizer="SGD")
eval_metric_ops = {
"rmse": tf.metrics.root_mean_squared_error(targets, predictions)
}
else:
loss = None
train_op = None
eval_metric_ops = None
# 3. Create predictions
predictions_dict = {"predicted": predictions}
# 4. return ModelFnOps
return tflearn.ModelFnOps(
mode=mode,
predictions=predictions_dict,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
# -
# <h3> Experiment </h3>
#
# Distributed training is launched off using an Experiment. The key line here is that we use tflearn.Estimator rather than, say tflearn.DNNRegressor. This allows us to provide a model_fn, which will be our RNN defined above. Note also that we specify a serving_input_fn -- this is how we parse the input data provided to us at prediction time.
# +
def get_train():
return read_dataset('train.csv', mode=tf.contrib.learn.ModeKeys.TRAIN)
def get_valid():
return read_dataset('valid.csv', mode=tf.contrib.learn.ModeKeys.EVAL)
def serving_input_fn():
feature_placeholders = {
TIMESERIES_COL: tf.placeholder(tf.float32, [None, N_INPUTS])
}
features = {
key: tf.expand_dims(tensor, -1)
for key, tensor in feature_placeholders.items()
}
features[TIMESERIES_COL] = tf.squeeze(features[TIMESERIES_COL], axis=[2])
print 'serving: features={}'.format(features[TIMESERIES_COL])
return tflearn.utils.input_fn_utils.InputFnOps(
features,
None,
feature_placeholders
)
from tensorflow.contrib.learn.python.learn.utils import saved_model_export_utils
def experiment_fn(output_dir):
# run experiment
return tflearn.Experiment(
tflearn.Estimator(model_fn=simple_rnn, model_dir=output_dir),
train_input_fn=get_train(),
eval_input_fn=get_valid(),
eval_metrics={
'rmse': tflearn.MetricSpec(
metric_fn=metrics.streaming_root_mean_squared_error
)
},
export_strategies=[saved_model_export_utils.make_export_strategy(
serving_input_fn,
default_output_alternative_key=None,
exports_to_keep=1
)]
)
shutil.rmtree('outputdir', ignore_errors=True) # start fresh each time
learn_runner.run(experiment_fn, 'outputdir')
# -
# <h3> Standalone Python module </h3>
#
# To train this on Cloud ML Engine, we take the code in this notebook, make an standalone Python module.
# %bash
# run module as-is
REPO=$(pwd)
# echo $REPO
# rm -rf outputdir
export PYTHONPATH=${PYTHONPATH}:${REPO}/simplernn
python -m trainer.task \
--train_data_paths="${REPO}/train.csv*" \
--eval_data_paths="${REPO}/valid.csv*" \
--output_dir=${REPO}/outputdir \
--job-dir=./tmp
# Try out online prediction. This is how the REST API will work after you train on Cloud ML Engine
# %writefile test.json
{"rawdata": [0,0.214,0.406,0.558,0.655,0.687,0.65,0.549,0.393]}
# %bash
MODEL_DIR=$(ls ./outputdir/export/Servo/)
gcloud ml-engine local predict --model-dir=./outputdir/export/Servo/$MODEL_DIR --json-instances=test.json
# <h3> Cloud ML Engine </h3>
#
# Now to train on Cloud ML Engine.
# %bash
# run module on Cloud ML Engine
REPO=$(pwd)
BUCKET=asl-ml-immersion-temp # CHANGE AS NEEDED
OUTDIR=gs://${BUCKET}/simplernn/model_trained
JOBNAME=simplernn_$(date -u +%y%m%d_%H%M%S)
REGION=us-central1
gsutil -m rm -rf $OUTDIR
gcloud ml-engine jobs submit training $JOBNAME \
--region=$REGION \
--module-name=trainer.task \
--package-path=${REPO}/simplernn/trainer \
--job-dir=$OUTDIR \
--staging-bucket=gs://$BUCKET \
--scale-tier=BASIC \
--runtime-version=1.2 \
-- \
--train_data_paths="gs://${BUCKET}/train.csv*" \
--eval_data_paths="gs://${BUCKET}/valid.csv*" \
--output_dir=$OUTDIR \
--num_epochs=100
# <h2> Variant: long sequence </h2>
#
# To create short sequences from a very long sequence.
# +
import tensorflow as tf
import numpy as np
def breakup(sess, x, lookback_len):
N = sess.run(tf.size(x))
windows = [tf.slice(x, [b], [lookback_len]) for b in xrange(0, N-lookback_len)]
windows = tf.stack(windows)
return windows
x = tf.constant(np.arange(1,11, dtype=np.float32))
with tf.Session() as sess:
print 'input=', x.eval()
seqx = breakup(sess, x, 5)
print 'output=', seqx.eval()
# -
# Copyright 2017 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
| courses/machine_learning/deepdive/05_artandscience/d_customestimator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Big-O: NLogN examples
# +
import numpy as np
def nLogn(n):
a = n
while_loop = 0
while n > 1:
while_loop += 1
n = int(n/2)
inner_iteration_count = 0
for i in range(a):
inner_iteration_count += 1
print(f"Loop = {while_loop} For n ={n} inner_iteration_count = {inner_iteration_count}")
print(f"Final Loop = {while_loop} For n ={n} inner_iteration_count = {inner_iteration_count}")
print(f"BIGO -> Log(n) = log({a}) = {round(np.log(a))} Outer Loop = {while_loop} For each outer loop , inner loop is executed {inner_iteration_count} times")
print(f"BigO = Big(nlogn) = innerLoops * outerLoops = {inner_iteration_count} * {round(np.log(a))}")
# -
nLogn(10)
nLogn(500)
| Algorithms/BigO_Examples/Big0_NLogN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Effect of Scrambling Image on Training CNN-Based Classifiers
# # Contents:
#
# 1. [Outline](#outline)
# 2. [MNIST Dataset](#mnist_ds)
# 3. [Fashion-MNIST Dataset](#fashion_mnist_ds)
# 4. [CIFAR-10 Dataset](#cifar10_ds)
#
# <a id='outline'></a>
# # Outline
#
# Here we explore the effect of a random scrambling of the input image (shuffling pixels pixels) on classification in three tasks: case
# - MNIST
# - Fashion-MNIST
# - CIFAR10
#
# First, we expect no effect on fully-connected networks. For CNN-based networks, this is expected to significantly interfere with the networks ability to learn. An interesting question that one can ask is whether or not the classification ability of a CNN-network can be recovered by making the network deeper and narrower at the same time to increase the receptive fields.
# +
import sys
import os
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
sys.path.append(
os.path.dirname(os.getcwd())
)
from utilities.tile_image_plot_utilities import\
custom_tile_image_plot,\
custom_tile_plot_with_inference_hists
from utilities.generator_utilities import ScrambledImageDataGenerator
# -
# <a id='mnist_ds'></a>
# <br><br><br>
#
# ------
# # MNIST Dataset
# +
# Get test and train features and labels for the MNIST dataset:
mnist = tf.keras.datasets.mnist
mnist_train, mnist_test = mnist.load_data()
# Check the type and size of the test and train features and labels:
print("Train data: ", mnist_train[0].shape)
print("Train labels: ", mnist_train[1].shape)
print("Test data: ", mnist_test[0].shape)
print("Test labels: ", mnist_test[1].shape)
# -
fig = plt.figure(figsize=(16., 8.))
bins = np.linspace(start=-0.5, stop=9.5, num=11, endpoint=True)
bar_heights, _, _ = plt.hist(mnist_train[1], bins=bins,
color="royalblue", edgecolor="black", alpha=0.8,
rwidth=0.9, align="mid", label="Train")
plt.hist(mnist_test[1], bins=bins, bottom=bar_heights,
color="salmon", edgecolor="black", alpha=0.8,
rwidth=0.9, align="mid", label="Test")
plt.xticks(np.arange(10), labels=np.arange(10),
fontsize=14., fontweight="normal")
plt.legend(fontsize=14.)
plt.title("MNIST Dataset", fontsize=16., fontweight="bold")
plt.grid()
plt.gca().set_axisbelow(True)
plt.show()
# ## Unscrambled Images
# +
# Visualize some of the images:
image_gen = ScrambledImageDataGenerator(
features=mnist_train[0][0:256, :, :, np.newaxis],
labels=mnist_train[1][0:256],
batch_size=255,
scrambler_array=None,
normalize=False)
custom_tile_image_plot(
(15,15),
image_gen[0][0],
labels=image_gen[0][1],
label_size=28.,
label_color="red",
filename="",
figure_size=(16., 16.))
# -
# ## Scramble Images:
# +
# Visualize scrampled images:
num_pixels = 28 * 28
scrambler = np.linspace(
start=0, stop=num_pixels, num=num_pixels,
endpoint=False, dtype=np.int32)
np.random.shuffle(scrambler)
scrambled_image_gen = ScrambledImageDataGenerator(
features=mnist_train[0][0:256, :, :, np.newaxis],
labels=mnist_train[1][0:256],
batch_size=255,
scrambler_array=scrambler,
normalize=False)
custom_tile_image_plot(
(15,15),
scrambled_image_gen[0][0],
labels=scrambled_image_gen[0][1],
label_size=28.,
label_color="red",
filename="",
figure_size=(16., 16.))
# -
# <a id='fashion_mnist_ds'></a>
# <br><br><br>
#
# ----
# # Fashion MNIST Dataset
# +
# Get test and train features and labels for the MNIST dataset:
fashion_mnist = tf.keras.datasets.fashion_mnist
fmnist_train, fmnist_test = fashion_mnist.load_data()
# Check the type and size of the test and train features and labels:
print("Train data: ", fmnist_train[0].shape)
print("Train labels: ", fmnist_train[1].shape)
print("Test data: ", fmnist_test[0].shape)
print("Test labels: ", fmnist_test[1].shape)
# -
fig = plt.figure(figsize=(16., 8.))
bins = np.linspace(start=-0.5, stop=9.5, num=11, endpoint=True)
bar_heights, _, _ = plt.hist(fmnist_train[1], bins=bins,
color="royalblue", edgecolor="black", alpha=0.8,
rwidth=0.9, align="mid", label="Train")
plt.hist(fmnist_test[1], bins=bins, bottom=bar_heights,
color="salmon", edgecolor="black", alpha=0.8,
rwidth=0.9, align="mid", label="Test")
plt.xticks(np.arange(10), labels=np.arange(10),
fontsize=14., fontweight="normal")
plt.legend(fontsize=14.)
plt.title("Fashion MNIST Dataset", fontsize=16., fontweight="bold")
plt.grid()
plt.gca().set_axisbelow(True)
plt.show()
# ## Unscrambled Images
# +
# Visualize some of the images:
image_gen = ScrambledImageDataGenerator(
features=fmnist_train[0][0:256,:,:],
labels=fmnist_train[1][0:256],
batch_size=255,
scrambler_array=None,
normalize=False)
custom_tile_image_plot(
(15,15),
image_gen[0][0],
labels=image_gen[0][1],
label_size=28.,
label_color="red",
filename="",
figure_size=(16., 16.))
# -
# ## Scrambled Images
# +
# Visualize scrampled images:
num_pixels = 28 * 28
scrambler = np.linspace(
start=0, stop=num_pixels, num=num_pixels,
endpoint=False, dtype=np.int32)
np.random.shuffle(scrambler)
scrambled_image_gen = ScrambledImageDataGenerator(
features=fmnist_train[0][0:256,:,:],
labels=fmnist_train[1][0:256],
batch_size=255,
scrambler_array=scrambler,
normalize=False)
custom_tile_image_plot(
(15,15),
scrambled_image_gen[0][0],
labels=scrambled_image_gen[0][1],
label_size=28.,
label_color="red",
filename="",
figure_size=(16., 16.))
# -
# <a id='cifar10_ds'></a>
# <br><br><br>
#
# ----
# # CIFAR-10 Dataset
# +
# Get test and train features and labels for the MNIST dataset:
cifar10 = tf.keras.datasets.cifar10
cifar10_train, cifar10_test = cifar10.load_data()
# Check the type and size of the test and train features and labels:
print("Train data: ", cifar10_train[0].shape)
print("Train labels: ", cifar10_train[1].shape)
print("Test data: ", cifar10_test[0].shape)
print("Test labels: ", cifar10_test[1].shape)
# -
fig = plt.figure(figsize=(16., 8.))
bins = np.linspace(start=-0.5, stop=9.5, num=11, endpoint=True)
bar_heights, _, _ = plt.hist(cifar10_train[1], bins=bins,
color="royalblue", edgecolor="black", alpha=0.8,
rwidth=0.9, align="mid", label="Train")
plt.hist(cifar10_test[1], bins=bins, bottom=bar_heights,
color="salmon", edgecolor="black", alpha=0.8,
rwidth=0.9, align="mid", label="Test")
plt.xticks(np.arange(10), labels=np.arange(10),
fontsize=14., fontweight="normal")
plt.legend(fontsize=14.)
plt.title("Fashion MNIST Dataset", fontsize=16., fontweight="bold")
plt.grid()
plt.gca().set_axisbelow(True)
plt.show()
# ## Unscrambled Images
# +
# Visualize some of the images:
image_gen = ScrambledImageDataGenerator(
features=cifar10_train[0][0:256,:,:],
labels=cifar10_train[1][0:256, 0],
batch_size=255,
scrambler_array=None,
normalize=False)
custom_tile_image_plot(
(15,15),
image_gen[0][0],
labels=image_gen[0][1],
label_size=18.,
label_color="red",
filename="",
figure_size=(16., 16.))
# -
# ## Scrambled Images
# +
# Visualize scrampled images:
num_pixels = 32 * 32
scrambler = np.linspace(
start=0, stop=num_pixels, num=num_pixels,
endpoint=False, dtype=np.int32)
np.random.shuffle(scrambler)
scrambled_image_gen = ScrambledImageDataGenerator(
features=cifar10_train[0][0:256,:,:],
labels=cifar10_train[1][0:256, 0],
batch_size=255,
scrambler_array=scrambler,
normalize=False)
custom_tile_image_plot(
(15,15),
scrambled_image_gen[0][0],
labels=scrambled_image_gen[0][1],
label_size=18.,
label_color="red",
filename="",
figure_size=(16., 16.))
# -
# ## Unscramble Scrambled Images
# +
unscrambler = np.argsort(scrambler)
unscrambled_images = np.zeros_like(scrambled_image_gen[0][0])
for idx in range(unscrambled_images.shape[0]):
for c in range(3):
temp_array = scrambled_image_gen[0][0][idx, :, :, c].flatten()[unscrambler]
unscrambled_images[idx, :, :, c] = temp_array.reshape(
unscrambled_images.shape[1: -1])
custom_tile_image_plot(
(15,15),
unscrambled_images,
labels=scrambled_image_gen[0][1],
label_size=18.,
label_color="red",
filename="",
figure_size=(16., 16.))
# -
# +
# ## Model Constructor
# def FCNClassifierModelConstructor( input_shape,
# numb_classes,
# hidden_layers_map={1:16,2:32,3:64,4:32,5:32,6:16,7:8},
# activation=tf.nn.relu ):
# """
# Constructs and retursn a fully connected tf.keras model.
# Args:
# input_shape (tuple): Input shape.
# numb_classes (int): Number of classes (output layer size).
# hidden_layers_map (dict): If provided, the *hidden* layers are constructed as outlined.
# Note that this dictionary excludes the last layer!
# activation (tf.nn): An instance of activation function.
# Returns:
# tf.keras.model
# """
# input_size=1
# for d in input_shape:
# input_size *= d
# #
# ## Construct model
# model_ = tf.keras.models.Sequential()
# model_.add( tf.keras.layers.Flatten( input_shape=input_shape,
# name="Flatten" ) )
# for l in sorted(hidden_layers_map,reverse=False):
# if( l==1 ):
# model_.add( tf.keras.layers.Dense( hidden_layers_map[l],
# input_dim=input_size,
# activation=activation,
# use_bias=True,
# kernel_initializer='glorot_uniform',
# bias_initializer='zeros',
# kernel_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
# kernel_constraint=None,
# bias_constraint=None,
# name="Dense_"+str(l) ) )
# else:
# model_.add( tf.keras.layers.Dense( hidden_layers_map[l],
# activation=activation,
# use_bias=True,
# kernel_initializer='glorot_uniform',
# bias_initializer='zeros',
# kernel_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
# kernel_constraint=None,
# bias_constraint=None,
# name="Dense_"+str(l) ) )
# model_.add( tf.keras.layers.Dense(numb_classes, activation=tf.nn.softmax,name="Softmax") )
# #
# return model_
# def CNNClassifierModelConstructor( input_shape,
# numb_classes,
# cnn_layers_map={1:(16, (4,4), (1,1), (4,4), (1,1)),
# 2:(16, (4,4), (1,1), (4,4), (1,1)),
# 3:(16, (4,4), (1,1), (4,4), (1,1)),
# 4:(16, (4,4), (1,1), None, None),
# 5:(16, (4,4), (1,1), None, None) },
# fcn_layers_map={1:64,2:32},
# cnn_activation=tf.nn.relu,
# fcn_activation=tf.nn.relu,
# padding='valid',
# data_format='channels_last' ):
# """
# Constructs and retursn a CNN tf.keras model.
# Args:
# input_shape (tuple): Input shape.
# numb_classes (int): Number of classes (output layer size).
# cnn_layers_map (dict): If provided, the *convolutional* layers are constructed as outlined. It is
# a dictionary with layer number as key and 5-dimensional tuple as value. The
# last two elements in the tuple are pertinent to the max pool layers. If set
# to None, max pooling will be skipped.
# fcn_layers_map (dict): If provided, the *fully connected* layers are constructed as outlined.
# Note that this dictionary excludes the last layer!
# cnn_activation (tf.nn): An instance of activation function.
# fcn_activation (tf.nn): An instance of activation function.
# padding (str): Type of padding for CNN and MaxPool layers: 'valid' or 'simple'
# data_format (str): Data format of the input:
# channels_first <---> (batch, height, width, channels)
# channels_last <---> (batch, channels, height, width)
# Returns:
# tf.keras.model
# """
# model_ = tf.keras.models.Sequential()
# for l in sorted(cnn_layers_map,reverse=False):
# if( l==1 ):
# model_.add( tf.keras.layers.Conv2D( input_shape=input_shape,
# filters=cnn_layers_map[l][0],
# kernel_size=cnn_layers_map[l][1],
# strides=cnn_layers_map[l][2],
# padding=padding,
# data_format=data_format,
# dilation_rate=(1,1),
# activation=cnn_activation,
# use_bias=True,
# kernel_initializer='glorot_uniform',
# bias_initializer='zeros',
# kernel_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
# kernel_constraint=None,
# bias_constraint=None,
# name="CNN_"+str(l) ) )
# if( cnn_layers_map[l][3] is not None ):
# model_.add( tf.keras.layers.MaxPool2D( pool_size=cnn_layers_map[l][3],
# strides=cnn_layers_map[l][4],
# padding=padding,
# data_format=None,
# name="MaxPool_"+str(l) ) )
# else:
# model_.add( tf.keras.layers.Conv2D( filters=cnn_layers_map[l][0],
# kernel_size=cnn_layers_map[l][1],
# strides=cnn_layers_map[l][2],
# padding=padding,
# data_format=data_format,
# dilation_rate=(1,1),
# activation=cnn_activation,
# use_bias=True,
# kernel_initializer='glorot_uniform',
# bias_initializer='zeros',
# kernel_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
# kernel_constraint=None,
# bias_constraint=None,
# name="CNN_"+str(l) ) )
# if( cnn_layers_map[l][3] is not None ):
# model_.add( tf.keras.layers.MaxPool2D( pool_size=cnn_layers_map[l][3],
# strides=cnn_layers_map[l][4],
# padding=padding,
# data_format=None,
# name="MaxPool_"+str(l) ) )
# model_.add(tf.keras.layers.Flatten( name="Flatten" ))
# for l in sorted(fcn_layers_map,reverse=False):
# model_.add( tf.keras.layers.Dense( fcn_layers_map[l],
# activation=fcn_activation,
# use_bias=True,
# kernel_initializer='glorot_uniform',
# bias_initializer='zeros',
# kernel_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
# kernel_constraint=None,
# bias_constraint=None,
# name="Dense_"+str(l+len(cnn_layers_map)) ) )
# ## Last layer:
# model_.add( tf.keras.layers.Dense(numb_classes, activation=tf.nn.softmax,name="Softmax") )
# #
# return model_
# +
## CASE Ia: FCN Without scrambling:
tf.reset_default_graph()
#
## Construct a model
fcn_wo_model = FCNClassifierModelConstructor( input_shape=(28,28),
numb_classes=10,
hidden_layers_map={1:512, 2:256, 3:128, 4:64},
activation=tf.nn.relu )
print( fcn_wo_model.summary() )
print( "_"*32, end="\n\n" )
#
## Compiling the model:
fcn_wo_model.compile( optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'] )
#
## We add a tensorboard callback:
tbCallBack = tf.keras.callbacks.TensorBoard( log_dir='./MNIST_FCN_WO_Scrambling',
histogram_freq=1,
batch_size=32,
write_graph=True,
write_images=True,
write_grads=True,
update_freq='epoch')
#
## Early stopping callback to prevent overfitting:
earlystopCallback = tf.keras.callbacks.EarlyStopping( monitor='val_loss',
min_delta=0.001,
patience=10,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True )
#
## Construct generators for training and validation:
tv_idx_split = int(0.8*mnist_x_train.shape[0])
train_generator = DataGenerator( features=mnist_x_train[0:tv_idx_split,:,:],
labels=mnist_y_train[0:tv_idx_split],
batch_size=32,
scrambler_array=None,
normalize=True )
validation_generator = DataGenerator( features=mnist_x_train[tv_idx_split:,:,:],
labels=mnist_y_train[tv_idx_split:],
batch_size=32,
scrambler_array=None,
normalize=True )
print( "Training Length: " , len(train_generator) )
print( "Validation Length: " , len(validation_generator) )
print( "_"*32, end="\n\n" )
#
## Trainig Time!
##==============
fcn_wo_model.fit_generator( generator=train_generator,
steps_per_epoch=None,
epochs=10000,
verbose=2,
callbacks=[earlystopCallback,tbCallBack],
validation_data=validation_generator,
validation_steps=None,
class_weight=None,
max_queue_size=100,
workers=8,
use_multiprocessing=True,
initial_epoch=0 )
print( "_"*32, end="\n\n" )
#
## Testing Time!
##==============
fcn_wo_model.evaluate( x=mnist_x_test,
y=mnist_y_test,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
max_queue_size=10,
workers=8,
use_multiprocessing=True )
# -
test_generator = DataGenerator( features=mnist_x_test,
labels=mnist_y_test,
batch_size=1,
scrambler_array=None,
normalize=True )
print( "Test Length: " , len(test_generator) )
y_predict = fcn_wo_model.predict_generator( generator=test_generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=8,
use_multiprocessing=True,
verbose=0 )
CustomTilePlotWithHistogram( (10,10),
images=mnist_x_test,
labels=mnist_y_test,
predictions=y_predict,
classes=np.linspace(start=0,stop=10,num=10,endpoint=False,dtype=np.uint8),
only_mispredicted=True,
filename='',
cmap='gray',
label_size=32 )
# +
## CASE IIa: FCN Without scrambling:
tf.reset_default_graph()
#
## Construct a model
fcn_w_model = FCNClassifierModelConstructor( input_shape=(28,28),
numb_classes=10,
hidden_layers_map={1:512, 2:256, 3:128, 4:64},
activation=tf.nn.relu )
print( fcn_w_model.summary() )
print( "_"*32, end="\n\n" )
#
## Compiling the model:
fcn_w_model.compile( optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'] )
#
## We add a tensorboard callback:
tbCallBack = tf.keras.callbacks.TensorBoard( log_dir='./MNIST_FCN_W_Scrambling',
histogram_freq=1,
batch_size=32,
write_graph=True,
write_images=True,
write_grads=True,
update_freq='epoch')
#
## Early stopping callback to prevent overfitting:
earlystopCallback = tf.keras.callbacks.EarlyStopping( monitor='val_loss',
min_delta=0.001,
patience=10,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True )
#
## Construct generators for training and validation:
tv_idx_split = int(0.8*mnist_x_train.shape[0])
train_generator = DataGenerator( features=mnist_x_train[0:tv_idx_split,:,:],
labels=mnist_y_train[0:tv_idx_split],
batch_size=32,
scrambler_array=mnist_scrambler,
normalize=True )
validation_generator = DataGenerator( features=mnist_x_train[tv_idx_split:,:,:],
labels=mnist_y_train[tv_idx_split:],
batch_size=32,
scrambler_array=mnist_scrambler,
normalize=True )
print( "Training Length: " , len(train_generator) )
print( "Validation Length: " , len(validation_generator) )
print( "_"*32, end="\n\n" )
#
## Trainig Time!
##==============
fcn_w_model.fit_generator( generator=train_generator,
steps_per_epoch=None,
epochs=10000,
verbose=2,
callbacks=[earlystopCallback,tbCallBack],
validation_data=validation_generator,
validation_steps=None,
class_weight=None,
max_queue_size=100,
workers=8,
use_multiprocessing=True,
initial_epoch=0 )
print( "_"*32, end="\n\n" )
#
## Testing Time!
##==============
test_generator = DataGenerator( features=mnist_x_test,
labels=mnist_y_test,
batch_size=32,
scrambler_array=mnist_scrambler,
normalize=True )
print( "Test Length: " , len(test_generator) )
fcn_w_model.evaluate_generator( test_generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=8,
use_multiprocessing=True,
verbose=0 )
# +
# ## CASE Ib: CCN WITHOUT scrambling:
tf.reset_default_graph()
#
## Construct a model
ccn_wo_model = CNNClassifierModelConstructor( input_shape=(28,28,1),
numb_classes=10,
cnn_layers_map={1:(32, (4,4), (1,1), (8,8), (1,1)),
2:(16, (4,4), (1,1), (4,4), (1,1)),
3:(8, (4,4), (1,1), (2,2), (1,1)) },
fcn_layers_map={1:64,2:32},
cnn_activation=tf.nn.relu,
fcn_activation=tf.nn.relu,
padding='valid',
data_format='channels_last' )
print( ccn_wo_model.summary() )
print( "_"*32, end="\n\n" )
#
## Compiling the model:
ccn_wo_model.compile( optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'] )
#
## We add a tensorboard callback:
tbCallBack = tf.keras.callbacks.TensorBoard( log_dir='./MNIST_CCN_WO_Scrambling',
histogram_freq=1,
batch_size=32,
write_graph=True,
write_images=True,
write_grads=True,
update_freq='epoch')
#
## Early stopping callback to prevent overfitting:
earlystopCallback = tf.keras.callbacks.EarlyStopping( monitor='val_loss',
min_delta=0.001,
patience=10,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True )
#
## Construct generators for training and validation:
tv_idx_split = int(0.8*mnist_x_train.shape[0])
train_generator = DataGenerator( features=mnist_x_train[0:tv_idx_split,:,:,np.newaxis],
labels=mnist_y_train[0:tv_idx_split],
batch_size=32,
scrambler_array=None,
normalize=True )
validation_generator = DataGenerator( features=mnist_x_train[tv_idx_split:,:,:,np.newaxis],
labels=mnist_y_train[tv_idx_split:],
batch_size=32,
scrambler_array=None,
normalize=True )
print( "Training Length: " , len(train_generator) )
print( "Validation Length: " , len(validation_generator) )
print( "_"*32, end="\n\n" )
#
## Trainig Time!
##==============
ccn_wo_model.fit_generator( generator=train_generator,
steps_per_epoch=None,
epochs=10000,
verbose=2,
callbacks=[earlystopCallback,tbCallBack],
validation_data=validation_generator,
validation_steps=None,
class_weight=None,
max_queue_size=100,
workers=8,
use_multiprocessing=True,
initial_epoch=0 )
#
## Testing Time!
##==============
ccn_wo_model.evaluate( x=mnist_x_test[:,:,:,np.newaxis],
y=mnist_y_test,
batch_size=None,
verbose=1,
sample_weight=None,
steps=None,
callbacks=None,
max_queue_size=10,
workers=8,
use_multiprocessing=True )
# -
# test_generator = DataGenerator( features=mnist_x_test[:,:,:,np.newaxis],
# labels=mnist_y_test,
# batch_size=1,
# scrambler_array=None,
# normalize=True )
# print( "Test Length: " , len(test_generator) )
# y_predict = ccn_wo_model.predict_generator( generator=test_generator,
# steps=None,
# callbacks=None,
# max_queue_size=10,
# workers=8,
# use_multiprocessing=True,
# verbose=0 )
y_predict = ccn_wo_model.predict( x=mnist_x_test[:,:,:,np.newaxis],
batch_size=None,
verbose=0,
steps=None,
callbacks=None,
max_queue_size=10,
workers=8,
use_multiprocessing=True )
CustomTilePlotWithHistogram( (10,10),
images=mnist_x_test,
labels=mnist_y_test,
predictions=y_predict,
classes=np.linspace(start=0,stop=10,num=10,endpoint=False,dtype=np.uint8),
only_mispredicted=True,
filename='',
cmap='gray',
label_size=32 )
# +
## CASE IIb: CCN WITH scrambling:
tf.reset_default_graph()
#
## Construct a model
ccn_w_model = CNNClassifierModelConstructor( input_shape=(28,28,1),
numb_classes=10,
cnn_layers_map={1:(32, (4,4), (1,1), (8,8), (1,1)),
2:(16, (4,4), (1,1), (4,4), (1,1)),
3:(8, (4,4), (1,1), (2,2), (1,1)) },
fcn_layers_map={1:64,2:32},
cnn_activation=tf.nn.relu,
fcn_activation=tf.nn.relu,
padding='valid',
data_format='channels_last' )
print( ccn_w_model.summary() )
print( "_"*32, end="\n\n" )
#
## Compiling the model:
ccn_w_model.compile( optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'] )
#
## We add a tensorboard callback:
tbCallBack = tf.keras.callbacks.TensorBoard( log_dir='./MNIST_CCN_W_Scrambling',
histogram_freq=1,
batch_size=32,
write_graph=True,
write_images=True,
write_grads=True,
update_freq='epoch')
#
## Early stopping callback to prevent overfitting:
earlystopCallback = tf.keras.callbacks.EarlyStopping( monitor='val_loss',
min_delta=0.001,
patience=10,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True )
#
## Construct generators for training and validation:
tv_idx_split = int(0.8*mnist_x_train.shape[0])
train_generator = DataGenerator( features=mnist_x_train[0:tv_idx_split,:,:,np.newaxis],
labels=mnist_y_train[0:tv_idx_split],
batch_size=32,
scrambler_array=mnist_scrambler,
normalize=True )
validation_generator = DataGenerator( features=mnist_x_train[tv_idx_split:,:,:,np.newaxis],
labels=mnist_y_train[tv_idx_split:],
batch_size=32,
scrambler_array=mnist_scrambler,
normalize=True )
print( "Training Length: " , len(train_generator) )
print( "Validation Length: " , len(validation_generator) )
print( "_"*32, end="\n\n" )
#
## Trainig Time!
##==============
ccn_w_model.fit_generator( generator=train_generator,
steps_per_epoch=None,
epochs=10000,
verbose=2,
callbacks=[earlystopCallback,tbCallBack],
validation_data=validation_generator,
validation_steps=None,
class_weight=None,
max_queue_size=100,
workers=8,
use_multiprocessing=True,
initial_epoch=0 )
#
## Testing Time!
##==============
test_generator = DataGenerator( features=mnist_x_test[:,:,:,np.newaxis],
labels=mnist_y_test,
batch_size=32,
scrambler_array=mnist_scrambler,
normalize=True )
print( "Test Length: " , len(test_generator) )
ccn_w_model.evaluate_generator( test_generator,
steps=None,
callbacks=None,
max_queue_size=10,
workers=8,
use_multiprocessing=True,
verbose=0 )
| old_projects/cnn_classifiers/classification_with_scrambled_images.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.0.0
# language: julia
# name: julia-1.0
# ---
include("JQuantmod.jl")
using PyPlot
P = get_log_return("AAPL","2018-01-01","2018-10-01")
Q = get_log_return_sp500("2018-01-01","2018-10-01")
plot(cumsum(P), label="Apple")
plot(cumsum(Q), label="Average")
legend()
# compute sharpe ratio
using Statistics
irx = get_irx("2018-01-01","2018-10-01")
P = get_log_return("AAPL","2018-01-01","2018-10-01")
excess = P - irx
println("Sharpe Ratio = $(mean(excess)/std(excess))")
| test/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # In-Class Coding Lab: Strings
#
# The goals of this lab are to help you to understand:
#
# - String slicing for substrings
# - How to use Python's built-in String functions in the standard library.
# - Tokenizing and Parsing Data
# - How to create user-defined functions to parse and tokenize strings
#
#
# # Strings
#
# ## Strings are immutable sequences
#
# Python strings are immutable sequences.This means we cannot change them "in part" and there is impicit ordering.
#
# The characters in a string are zero-based. Meaning the index of the first character is 0.
#
# We can leverage this in a variety of ways.
#
# For example:
# +
x = input("Enter something: ")
print ("You typed:", x)
print ("number of characters:", len(x) )
print ("First character is:", x[0])
print ("Last character is:", x[-1])
## They're sequences, so you can loop definately:
print("Printing one character at a time: ")
for ch in x:
print(ch) # print a character at a time!
# -
# ## Slices as substrings
#
# Python lists and sequences use **slice notation** which is a clever way to get substring from a given string.
#
# Slice notation requires two values: A start index and the end index. The substring returned starts at the start index, and *ends at the position before the end index*. It ends at the position *before* so that when you slice a string into parts you know where you've "left off".
#
# For example:
state = "Mississippi"
print (state[0:4]) # Miss
print (state[4:len(state)]) # issippi
# In this next example, play around with the variable `split` adjusting it to how you want the string to be split up. Re run the cell several times with different values to get a feel for what happens.
state = "Mississippi"
split = 4 # TODO: play around with this number
left = state[4:split]
right = state[split:len(state)]
print(left, right)
# ### Slicing from the beginning or to the end
#
# If you omit the begin or end slice, Python will slice from the beginnning of the string or all the way to the end. So if you say `x[:5]` its the same as `x[0:5]`
#
# For example:
state = "Ohio"
print(state[0:2], state[:2]) # same!
print(state[2:len(state)], state[2:]) # same
# ### Now Try It!
#
# Split the string `"New Hampshire"` into two sub-strings one containing `"New"` the other containing `"Hampshire"` (without the space).
## TODO: Write code here
state = "New Hampshire"
print(state[0:3])
print(state[4:13])
# ## Python's built in String Functions
#
# Python includes several handy built-in string functions (also known as *methods* in object-oriented parlance). To get a list of available functions, use the `dir()` function on any string variable, or on the type `str` itself.
#
print ( dir(str))
# Let's suppose you want to learn how to use the `count` function. There are 2 ways you can do this.
#
# 1. search the web for `python 3 str count` or
# 1. bring up internal help `help(str.count)`
#
# Both have their advantages and disadvanges. I would start with the second one, and only fall back to a web search when you can't figure it out from the Python documenation.
#
# Here's the documentation for `count`
help(str.count)
# You'll notice in the help output it says S.count() this indicates this function is a method function. this means you invoke it like this `variable.count()`.
#
# ### Now Try It
#
# Try to use the count() function method to count the number of `'i'`'s in the string `'Mississippi`:
state = 'Mississippi'
sub = 'i'
print("The number of i's in Mississippi is: ")
state.count(sub,0,11)
# ### TANGENT: The Subtle difference between function and method.
#
# You'll notice sometimes we call our function alone, other times it's attached to a variable, as was the case in the example above. when we say `state.count('i')` the period (`.`) between the variable and function indicates this function is a *method function*. The key difference between a the two is a method is attached to a variable. To call a method function you must say `variable.function()` whereas when you call a function its just `function()`. The variable associated with the method call is usually part of the function's context.
#
# Here's an example:
name = "Larry"
print( len(name) ) # a function call len(name) stands on its own. Gets length of 'Larry'
print( name.__len__() ) # a method call name.__len__() does the name thing for its variable 'Larry'
# ### Now Try It
#
# Try to figure out which built in string function to use to accomplish this task.
#
# Write some code to find the text `'is'` in some text. The program shoud output the first position of `'is'` in the text.
#
# Examples:
#
# ```
# When: text = 'Mississippi' then position = 1
# When: text = "This is great" then position = 2
# When: text = "Burger" then position = -1
# ```
# TODO: Write your code here
text = input("Enter some text: ")
print(text.find('is'))
# ### Now Try It
#
# **Is that a URL?**
#
# Try to write a rudimentary URL checker. The program should input a text string and then use the `startswith` function to check if the string begins with `"http://"` or `"https://"` If it does we can assume it is a URL.
str = input("Enter a string: ")
print(str.startswith( 'https://' ) or str.startswith( 'http://' ))
| content/lessons/07/Class-Coding-Lab/CCL-Strings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Homework – Spectral Analysis
# ## IEOR 135/290, Data-X: Applied Data Ventures
# Author: <NAME> (in collaboration with <NAME>)
#
# UC Berkeley, B.S. EECS'21
#
# Email: <EMAIL>
#
#
# ## Objective
#
# In this homework, we will explore using spectral analysis to understand how you can generate features that you can use to build classification and predictive models for timeseries data.
# ### Setting Up Imports and Dataset
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import librosa
# %matplotlib inline
sns.set(rc={'figure.figsize':(14,6)})
# +
samples, sampling_rate = librosa.load("audio.wav", sr=None, mono=True, offset=0.0, duration=None)
len(samples), sampling_rate
# -
# ### Your Audio File
plt.plot(np.linspace(start=0, stop=len(samples)/sampling_rate, num=len(samples)), samples)
plt.xlabel("Duration (s)")
plt.ylabel("Normalized Aplitude")
# ### Question 1
# Compute the fourier transformation of the HSN1F Value in the dataset, and store it in fft_hsn1f.
freq = ...
fft_x = ...
# -------------------
# As expected, you notice a real and an imaginary component in the above array. The plot below shows fft_hsn1f plotted vs samples
#
# **Plot:** Fourier Transformation of Audio File vs. Sample Count
plt.plot(abs(fft_x))
plt.xlabel("Sample Count")
# ### Question 2
# What do you notice about this signal?
# +
# Use this space to manipulate the signal for your analysis
# -
# *Your answer here*
# ### Question 3
# **Question 3a: Shifting**
#
# Use the Swap Half Spaces technique to process the FFT signal. Generate plots and discuss your conclusions.
# +
# Write your code here
fft_x_shifted = ...
freq_shifted = ...
# +
# Write your code here to generate plots
plt.plot(freq_shifted, np.abs(fft_x_shifted))
plt.xlabel("Frequency (Hz)")
# -
# *Discuss your conclusions here*
# **Question 3b: Folding**
#
# Use the Fold Negative Frequencies technique to process the FFT Signal
# +
# Write your code here
half_n = ...
fft_x_half = ...
freq_half = ...
# +
# Write your code here to generate plots
plt.plot(freq_half, np.abs(fft_x_half))
plt.xlabel("Frequency (Hz)")
plt.ylabel("Amplitude")
# -
# *Discuss your conclusions here*
# ### BONUS: Question 4
# **Question 4a: For each point in the FFT, how wide of a frequency bin does it represent?**
# *Discuss your answer here*
# **Question 4b: What index point in the FFT represents 1000 Hz?**
# *Discuss your answer here*
# **Question 4c: What fraction of power in this spectrum is between 0 and 1000 Hz**
#
# **Hint:** It will be the sum of the (fft magnitude values x 2)^2 in the range 0 Hz to 1000Hz in the full fft
# *Discuss your answer here*
# ### BONUS: Question 5
# In this question, we will modify Question 4 such that we will only be taking the FFT of only the first 250 ms of the original sample
# **Question 5a: For each point in the FFT, how wide of a frequency bin does it represent?**
# *Discuss your answer here*
# **Question 5b: What index point in the FFT represents 1000 Hz**
# *Discuss your answer here*
# **Question 5c: What fraction of power in this spectrum is between 0 and 1000 Hz**
#
# **Hint:** It will be the sum of the (fft magnitude values x 2)^2 in the range 0 Hz to 1000Hz in the full fft
# *Discuss your answer here*
| 02-data-x-signals/m250-theory-signal-spectral-lti-ffts/hw-m250-theory-signal-spectral-lti-ffts.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Stacking & Grid Search Example
# This notebook shows how to tune a stacking-classifier via GridSearch
# %load_ext watermark
# %watermark -p scikit-learn,mlxtend,xgboost
# ## Dataset
# +
from sklearn import model_selection
from sklearn.model_selection import train_test_split
from sklearn import datasets
data = datasets.load_breast_cancer()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
X_train_sub, X_valid, y_train_sub, y_valid = \
train_test_split(X_train, y_train, test_size=0.2, random_state=1, stratify=y_train)
print('Train/Valid/Test sizes:', y_train.shape[0], y_valid.shape[0], y_test.shape[0])
# -
# ## Baseline
# +
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
from mlxtend.classifier import StackingCVClassifier
from sklearn.linear_model import LogisticRegression
forest = RandomForestClassifier(n_estimators=100,
random_state=123)
boost = XGBClassifier(random_state=123, verbosity=0, use_label_encoder=False)
metaclassifier = LogisticRegression(random_state=123)
sclf = StackingCVClassifier(classifiers=[forest, boost],
meta_classifier=metaclassifier,
random_state=123)
# -
# Random forest:
forest.fit(X_train_sub, y_train_sub)
print(f"Training Accuracy: {forest.score(X_train_sub, y_train_sub):0.2f}")
print(f"Validation Accuracy: {forest.score(X_valid, y_valid):0.2f}")
print(f"Test Accuracy: {forest.score(X_test, y_test):0.2f}")
# Gradient boosting:
boost.fit(X_train_sub, y_train_sub)
print(f"Training Accuracy: {boost.score(X_train_sub, y_train_sub):0.2f}")
print(f"Validation Accuracy: {boost.score(X_valid, y_valid):0.2f}")
print(f"Test Accuracy: {boost.score(X_test, y_test):0.2f}")
# Stacking:
sclf.fit(X_train_sub, y_train_sub)
print(f"Training Accuracy: {sclf.score(X_train_sub, y_train_sub):0.2f}")
print(f"Validation Accuracy: {sclf.score(X_valid, y_valid):0.2f}")
print(f"Test Accuracy: {sclf.score(X_test, y_test):0.2f}")
# ## Grid Search
# Example showing how to tune hyperparameters of a nested ensemble using grid search:
# +
import numpy as np
from sklearn.model_selection import GridSearchCV
from sklearn.pipeline import make_pipeline
#from joblib import parallel_backend
pipe = make_pipeline(sclf)
params = {
'stackingcvclassifier__use_probas': [True, False],
'stackingcvclassifier__drop_proba_col': [None, 'last'],
'stackingcvclassifier__xgbclassifier__max_depth': [2, 4, 6, 8],
'stackingcvclassifier__randomforestclassifier__n_estimators': [100, 1000]
}
grid = GridSearchCV(estimator=pipe,
param_grid=params,
cv=10,
n_jobs=1,
verbose=2)
#with parallel_backend('threading'):
grid.fit(X_train, y_train)
grid.best_score_
# -
grid.best_params_
print(f"Training Accuracy: {grid.best_estimator_.score(X_train, y_train):0.2f}")
#print(f"Validation Accuracy: {grid.best_estimator_.score(X_valid, y_valid):0.2f}")
print(f"Test Accuracy: {grid.best_estimator_.score(X_test, y_test):0.2f}")
| hyperparameter-tuning-examples/01.2-gridsearch-stacking-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/ViFLara/Statistics-and-Machine-Learning/blob/master/Aula2_pandas.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="-Rnod5Faiz7a"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/"} id="fC_ke79t3pfF" outputId="e6b5c554-7d65-44df-e6d1-d7d6fc3bc065"
from google.colab import drive
drive.mount('/content/drive')
# + id="cfVByZsDjKJ4"
df1 = pd.read_excel("/content/Aracaju.xlsx")
df2 = pd.read_excel("/content/Fortaleza.xlsx")
df3 = pd.read_excel("/content/Natal.xlsx")
df4 = pd.read_excel("/content/Recife.xlsx")
df5 = pd.read_excel("/content/Salvador.xlsx")
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="zXpdcUnhl3YJ" outputId="97999170-c54b-4490-da74-3e6844dd791b"
df1.head()
# + id="RcAnvGRilkpK"
df = pd.concat([df1,df2,df3,df4,df5])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="VsYjW_tymNAZ" outputId="bc8034b5-15d2-498e-c383-81fb5b94d1f7"
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="Hdl6noy3mTi8" outputId="cdfee64b-2d5b-416b-cb32-85d43048e986"
df.tail()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="AM6mBfF_mwRt" outputId="c8aae8bc-62c5-441f-d75e-4e01f98ccbdc"
df.sample(5)
# + colab={"base_uri": "https://localhost:8080/"} id="JN-iiOzgmZOU" outputId="64e2fb9e-a72e-445b-af7c-18a5a35bd88c"
df.dtypes
# + id="gts5xmbAnFD2"
df['LojaID'] = df['LojaID'].astype("object")
# + colab={"base_uri": "https://localhost:8080/"} id="dQQqNykHnXuH" outputId="b6acd92a-7f74-4ca0-e874-f62b8cac638b"
df.dtypes
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="J-Mpt6nSnczp" outputId="1dfa674d-5932-41b6-fa19-ea891a7c3763"
df.head()
# + [markdown] id="MWR7fLkZnrz9"
# # Handling missing values
# + colab={"base_uri": "https://localhost:8080/"} id="gZiuIgRXoEmJ" outputId="15d7c48d-a465-42f4-a583-1cc106430dfd"
df.isnull().sum()
# + id="GteHPaaipC4D"
# Replacing null values with the mean
df['Vendas'].fillna(df['Vendas'].mean(), inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="b8jhgiIxp3_8" outputId="58fcf817-957a-41f2-dd59-9f3cf516b6c6"
df['Vendas'].mean()
# + id="ioyxdoQ1oUQE"
# Replacing null values with zero
df['Vendas'].fillna(0, inplace=True)
# + id="CPRKPOZxqVtj"
# Erasing lines with null values
df.dropna(inplace=True)
# + id="9quCaE9eqfxM"
df.dropna(subset=['Vendas'], inplace=True)
# + id="Z8zQP_IKq3CK"
# Removing rows with missing values in all columns
df.dropna(how="all", inplace=True)
# + [markdown] id="1WB3J1xKslYj"
# # Creating new columns
# + id="WnWrJclNsv_m"
df['Receita'] = df['Vendas'].mul(df['Qtde'])
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="xQBN1CdGtP0J" outputId="d026ca33-a143-4224-875d-51727cc19323"
df.head()
# + id="deDNMtUxtUMH"
df['Receita/Vendas'] = df['Receita'] / df['Vendas']
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="5dv3-AzWtk_I" outputId="ee510060-6ca8-40e9-e102-39365408eb91"
df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="42yu0VQWtoPu" outputId="2465b2db-4a6a-4524-a432-0e6757b91314"
df['Receita'].max()
# + colab={"base_uri": "https://localhost:8080/"} id="ve05sAvjtuoq" outputId="34241f02-0065-4c94-d10d-d4ce611650ec"
df['Receita'].min()
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="1j0a_VK1t06A" outputId="0799fb10-8b7a-4ec0-830a-b239c083e4f8"
df.nlargest(3, 'Receita')
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="iNl97O8pua9h" outputId="b935a5e5-2752-43c1-c211-ff9310ba6637"
df.nsmallest(3, 'Receita')
# + colab={"base_uri": "https://localhost:8080/"} id="fq7e4Q7pukAs" outputId="a96167a3-0426-40d9-a472-020f006814d6"
df.groupby("Cidade")['Receita'].sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 359} id="I7w0sXVxuxpZ" outputId="2f7f6cec-7118-4eb5-dc35-d85e0f1a58fa"
df.sort_values("Receita", ascending=False).head(10)
# + [markdown] id="w09BTFNdvtrJ"
# # Working with dates
# + id="bNsl8vY4vwgw"
df['Data'] = df['Data'].astype("int64")
# + colab={"base_uri": "https://localhost:8080/"} id="-luUmumlwLOL" outputId="2f36f160-160e-4a68-dace-867c0034ae6d"
df.dtypes
# + id="CQx9EjNdwOaV"
df['Data'] = pd.to_datetime(df['Data'])
# + colab={"base_uri": "https://localhost:8080/"} id="hCTrgCj2wYjv" outputId="e1d8ae03-3742-4b9b-8d24-272453c1bd85"
df.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="6tvvA9iXwcGw" outputId="1a927c2a-1c5d-4a5d-fb69-315fda5a9cf4"
df.groupby(df['Data'].dt.year)['Receita'].sum()
# + id="zYccka2QxA4x"
df['Ano_Venda'] = df['Data'].dt.year
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="DzyKgj60xRNt" outputId="6e233786-bc32-4b77-9caa-369e1ad6dc1c"
df.sample(5)
# + id="rsCy-pgjxUwp"
df['Mes_Venda'], df['Dia_Venda'] = (df['Data'].dt.month, df['Data'].dt.day)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="RsffyETmx0tS" outputId="4df73b5c-328b-48a8-b8ce-447c66bd4401"
df.sample(5)
# + colab={"base_uri": "https://localhost:8080/"} id="cBXwVQApx5Lr" outputId="4f4af59a-2ae5-42c7-f9c5-e46809caf938"
df['Data'].min()
# + id="8Tu6_aJcx-nS"
df['Diferenca_Dias'] = df['Data'] - df['Data'].min()
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="HStRO33XyfHM" outputId="1598841d-11c8-4249-d54e-118dc937d1ca"
df.sample(5)
# + id="AdV8w-a1yjBg"
df['Trimestre_Venda'] = df['Data'].dt.quarter
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="FwwHXDHgyw9W" outputId="75800ccd-0535-4b9a-e1b0-51015fe9b413"
df.sample(5)
# + id="xlbM6xhlzIvc"
vendas_marco_19 = df.loc[(df['Data'].dt.year == 2019) & (df['Data'].dt.month == 3)]
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="KA8ijBsgzjwY" outputId="a8ca8d7c-051a-434f-a494-afa7a37bbaa2"
vendas_marco_19
# + [markdown] id="ynCuoiPpzmSc"
# # Data visualization
# + colab={"base_uri": "https://localhost:8080/"} id="o6dCpd05moNE" outputId="f861d651-19e1-4d81-f807-67a07aabfdd8"
df['LojaID'].value_counts(ascending=False) # quantas vendas tem para cada LojaID
# + colab={"base_uri": "https://localhost:8080/", "height": 281} id="BHTmbJQSzu7L" outputId="9f6170a5-21bd-49a0-c299-f3cfb7473b72"
df['LojaID'].value_counts(ascending=False).plot.bar();
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="gp3oWpvdnJ_z" outputId="11b5e26c-0818-4985-ce8b-948a42115bf5"
df['LojaID'].value_counts().plot.barh();
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Q2R1bOu4npVE" outputId="982227a7-20a1-41d7-c942-1b9447ca80c1"
df['LojaID'].value_counts(ascending=True).plot.barh();
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="-eaK_ATwoAdo" outputId="3970422e-1d89-4070-f28b-fa20d175b3fd"
df.groupby(df['Data'].dt.year)['Receita'].sum().plot.pie()
# + colab={"base_uri": "https://localhost:8080/"} id="Yxqi6RqnomDo" outputId="f8502f54-93c8-4715-8ca7-d150425711c4"
df['Cidade'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 331} id="jKf2xCj2owE2" outputId="eb461224-228c-4145-d92a-c820c6bcdf34"
import matplotlib.pyplot as plt
df['Cidade'].value_counts().plot.bar(title="Total vendas por Cidade")
plt.xlabel("Cidade")
plt.ylabel("Total Vendas");
# + colab={"base_uri": "https://localhost:8080/", "height": 331} id="CPWpxaqqsvcs" outputId="fa83cd58-82b3-4f7b-8b3e-843e4b4e052e"
df['Cidade'].value_counts().plot.bar(title="Total vendas por Cidade", color="red")
plt.xlabel("Cidade")
plt.ylabel("Total Vendas");
# + id="0vNmcMG3tDaR"
plt.style.use("ggplot")
# + colab={"base_uri": "https://localhost:8080/", "height": 299} id="zwSclBATtNA6" outputId="0f3729b1-74f4-4c9e-cc38-048d02d432fd"
df.groupby(df['Mes_Venda'])['Qtde'].sum().plot(title= "Total Produtos Vendidos x Mês")
plt.xlabel("Mês")
plt.ylabel("Total Produtos Vendidos");
plt.legend();
# + colab={"base_uri": "https://localhost:8080/"} id="c7oTH3lyw26c" outputId="fb6e8398-2003-4b33-9221-c48e07c66cfc"
df.groupby(df["Mes_Venda"])["Qtde"].sum()
# + id="ZU64xuku4sfN"
# selecionando apenas as vendas de 2019
df_2019 = df[df['Ano_Venda'] == 2019]
# + colab={"base_uri": "https://localhost:8080/"} id="AIWkOnnl6iP1" outputId="e55dc9da-ddd8-4bf1-93c2-577a8231f653"
df_2019.groupby(df_2019['Mes_Venda'])['Qtde'].sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 284} id="sHRf97r75JjF" outputId="5d9090df-bbd1-48f3-b620-fac585ad0719"
# Total de produtos vendidos por mês
df_2019.groupby(df_2019['Mes_Venda'])['Qtde'].sum().plot(marker = "v")
plt.xlabel("Mês")
plt.ylabel("Total Produtos Vendidos em 2019");
plt.legend();
# + id="UIayJuywvibv" colab={"base_uri": "https://localhost:8080/", "height": 265} outputId="cdcc72e5-e2d5-4b9c-e878-6029e6974af5"
# histograma
plt.hist(df['Qtde'], color="orange");
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="OAyw5Mbb7Upu" outputId="8e86b6e9-d3c2-4e33-dbda-e8401d717270"
plt.scatter(x=df_2019['Dia_Venda'], y = df_2019['Receita']);
# + colab={"base_uri": "https://localhost:8080/", "height": 300} id="6N9yk_6x7wdB" outputId="e3be27a9-e96c-4b78-9890-66689bd9037a"
# salvando em png
df_2019.groupby(df_2019['Mes_Venda'])['Qtde'].sum().plot(marker = "o")
plt.title("Quantidade de produtos vendidos x mês")
plt.xlabel("Mês")
plt.ylabel("Total Produtos Vendidos");
plt.legend()
plt.savefig("grafico QTDE x MES.png")
| Aula2_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple Animations Using clear_output
# Sometimes you want to clear the output area in the middle of a calculation. This can be useful for doing simple animations. In terminals, there is the carriage-return (`'\r'`) for overwriting a single line, but the notebook frontend can clear the whole output area, not just a single line.
#
# To clear output in the Notebook you can use the `clear_output()` function. If you are clearing the output every frame of an animation, calling `clear_output()` will create noticeable flickering. You can use `clear_output(wait=True)` to add the *clear_output* call to a queue. When data becomes available to replace the existing output, the *clear_output* will be called immediately before the new data is added. This avoids the flickering by not rendering the cleared output to the screen.
# ## Simple example
# Here we show our progress iterating through a list:
import sys
import time
from IPython.display import display, clear_output
for i in range(10):
time.sleep(0.25)
clear_output(wait=True)
print(i)
sys.stdout.flush()
# ## AsyncResult.wait_interactive
# The AsyncResult object has a special `wait_interactive()` method, which prints its progress interactively,
# so you can watch as your parallel computation completes.
#
# **This example assumes you have an IPython cluster running, which you can start from the [cluster panel](/#clusters)**
# +
#from IPython import parallel
#rc = parallel.Client()
#view = rc.load_balanced_view()
#
#amr = view.map_async(time.sleep, [0.5]*100)
#
#amr.wait_interactive()
# -
# ## Matplotlib example
# You can also use `clear_output()` to clear figures and plots.
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
# +
from scipy.special import jn
x = np.linspace(0,5)
f, ax = plt.subplots()
ax.set_title("Bessel functions")
for n in range(1,10):
time.sleep(1)
ax.plot(x, jn(x,n))
clear_output(wait=True)
display(f)
# close the figure at the end, so we don't get a duplicate
# of the last plot
plt.close()
# -
| 001-Jupyter/001-Tutorials/001-Basic-Tutorials/001-IPython-Kernel/Animations Using clear_output.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Monte Carlo Simulation and Random Walk Generation
# $$ \frac{ S_{1+dt} - S_t}{S_t} = \mu dt + \sigma \sqrt {dt} \xi_t $$
# +
import numpy as np
import pandas as pd
def gbm(n_years =10, n_scenarios = 1000, mu=0.07,sigma = 0.15, steps_per_year = 12, s_0 = 100.0):
"""
Evolution of a Stock Price using Geometric Browian Motion Model (Monte Carlo Simulation)
"""
dt = 1/steps_per_year
n_steps = int(n_years * steps_per_year)
rets_plus_1 = np.random.normal(loc= (1+mu*dt),scale = (sigma*np.sqrt(dt)),size = (n_steps, n_scenarios), )
rets_plus_1[0] = 1
prices = s_0*pd.DataFrame(rets_plus_1).cumprod()
return prices
# -
import ashmodule as ash
ax = gbm(n_scenarios = 20).plot(legend = False,figsize = (12,6));
ax.set_xlim(left = 0);
gbm(n_scenarios = 10).head()
# %load_ext autoreload
# %autoreload 2
# # Using IPyWidget to Interact Plotting the Monte Carlo Simulation
import ipywidgets as widgets
from IPython.display import display
import matplotlib.pyplot as plt
# +
def show_gbm(n_scenarios=1000, mu=0.07, sigma=0.15, s_0=100.0):
"""
Draw the results of a stock price evolution under a Geometric Brownian Motion model
"""
s_0=s_0
prices = gbm(n_scenarios=n_scenarios, mu=mu, sigma=sigma, s_0=s_0)
ax = prices.plot(legend=False, color="indianred", alpha = 0.5, linewidth=2, figsize=(12,5))
ax.axhline(y=s_0, ls=":", color="black")
# draw a dot at the origin
ax.plot(0,s_0, marker='o',color='darkred', alpha=0.2)
# -
gbm_controls = widgets.interactive(ash.show_gbm,
n_scenarios = widgets.IntSlider(min=1,max=1000,step=5),
mu =(-0.3,0.3,0.05),
sigma =(0,0.5,0.01),
s_0 =(1,500,10)
)
display(gbm_controls)
# # Using IPyWidgets to interact with Monte Carlo Simulations and CPPI
def show_cppi(n_scenarios=50, mu=0.07, sigma=0.15, m=3, floor=0.0, riskfree_rate=0.03, y_max=100,s_0=100, steps_per_year = 12):
"""
Plot the results of a Monte Carlo Simulation of CPPI
"""
start = s_0
sim_rets = ash.gbm(n_scenarios=n_scenarios, mu=mu, sigma=sigma, steps_per_year=steps_per_year)
risky_r = pd.DataFrame(sim_rets)
# run the "back"-test
btr = ash.run_cppi(risky_r=pd.DataFrame(risky_r),riskfree_rate=riskfree_rate,m=m, start=start, floor=floor)
wealth = btr["risky_r"]
# calculate terminal wealth stats
y_max=wealth.values.max()*y_max/100
ax = wealth.plot(legend = False, alpha = 0.3, color = "indianred", figsize = (12,6))
ax.axhline(y=start, ls=":", color= "black")
ax.axhline(y=start*floor, ls="--",color = "red")
ax.set_ylim(top=y_max)
# +
cppi_controls = widgets.interactive(show_cppi,
n_scenarios=widgets.IntSlider(min=1, max=1000, step=5, value=50),
mu=(0., +.2, .01),
sigma=(0, .30, .05),
floor=(0, 2, .1),
m=(1, 5, .5),
riskfree_rate=(0, .05, .01),
y_max=widgets.IntSlider(min=0, max=100, step=1, value=100,
description="Zoom Y Axis")
)
# -
display(cppi_controls)
r_asset = ash.gbm(n_scenarios=50)
r_asset
ash.run_cppi((r_asset))["risky_r"][0].plot(legend=False,figsize =(12,6))
ash.run_cppi(r_asset,start = 100)["Wealth"].head()
r_asset.shape
r_asset.index = pd.date_range("2000-01",periods=r_asset.shape[0],freq="MS").to_period("M")
r_asset.head()
ash.run_cppi(r_asset,start = 100)["risky_r"].plot(legend = False);
ash.run_cppi(r_asset,start = 100)["risky_r"].plot(legend = False,figsize = (12,6),color= "red", alpha = 0.3);
| Introduction to Portfolio Construction and Analysis with Python/W3/.ipynb_checkpoints/Monte Carlo Simulation-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example queries for Economic Characteristics on COVID-19 Knowledge Graph
# [Work in progress]
#
# This notebook demonstrates how to run Cypher queries to get [Economic Characteristics from the American Community Survey 2018 5-year data](https://data.census.gov/cedsci/table?tid=ACSDP5Y2018.DP03) at multiple levels of geographic granularity.
import pandas as pd
import matplotlib.pyplot as plt
from py2neo import Graph
pd.options.display.max_rows = None # display all rows
pd.options.display.max_columns = None # display all columsns
# #### Connect to COVID-19-Net Knowledge Graph
graph = Graph("bolt://172.16.17.32:7687", user="reader", password="<PASSWORD>")
# ## Economic Characteristics
# The following variables are available for queries. Variable names ending with Pct represent values in percent, whereas all other variables represent counts. For details see [Subject Definitions](https://www2.census.gov/programs-surveys/acs/tech_docs/subject_definitions/2018_ACSSubjectDefinitions.pdf).
# #### Commuting
# 'DP03_0018E': 'workers16YearsAndOver',
# 'DP03_0019E': 'droveAloneToWorkInCarTruckOrVan',
# 'DP03_0019PE': 'droveAloneToWorkInCarTruckOrVanPct',
# 'DP03_0020E': 'carpooledToWorkInCarTruckOrVan',
# 'DP03_0020PE': 'carpooledToWorkInCarTruckOrVanPct',
# 'DP03_0021E': 'publicTransportToWork',
# 'DP03_0021PE': 'publicTransportToWorkPct',
# 'DP03_0022E': 'walkedToWork',
# 'DP03_0022PE': 'walkedToWorkPct',
# 'DP03_0023E': 'otherMeansOfCommutingToWork',
# 'DP03_0023PE': 'otherMeansOfCommutingToWorkPct',
# 'DP03_0024E': 'workedAtHome',
# 'DP03_0024PE': 'workedAtHomePct',
# 'DP03_0025E': 'meanTravelTimeToWorkMinutes',
# #### Employment
# 'DP03_0001E': 'population16YearsAndOver',
# 'DP03_0002E': 'population16YearsAndOverInLaborForce',
# 'DP03_0002PE': 'population16YearsAndOverInLaborForcePct',
# 'DP03_0003E': 'population16YearsAndOverInCivilianLaborForce',
# 'DP03_0003PE': 'population16YearsAndOverInCivilianLaborForcePct',
# 'DP03_0006E': 'population16YearsAndOverInArmedForces',
# 'DP03_0006PE': 'population16YearsAndOverInArmedForcesPct',
# 'DP03_0007E': 'population16YearsAndOverNotInLaborForce',
# 'DP03_0007PE': 'population16YearsAndOverNotInLaborForcePct',
# #### HealthInsurance
# 'DP03_0095E': 'civilianNoninstitutionalizedPopulation',
# 'DP03_0096E': 'withHealthInsuranceCoverage',
# 'DP03_0096PE': 'withHealthInsuranceCoveragePct',
# 'DP03_0097E': 'withPrivateHealthInsurance',
# 'DP03_0097PE': 'withPrivateHealthInsurancePct',
# 'DP03_0098E': 'withPublicCoverage',
# 'DP03_0098PE': 'withPublicCoveragePct',
# 'DP03_0099E': 'noHealthInsuranceCoverage',
# 'DP03_0099PE': 'noHealthInsuranceCoveragePct',
# #### Income
# 'DP03_0051E': 'totalHouseholds',
# 'DP03_0052E': 'householdIncomeLessThan10000USD',
# 'DP03_0052PE': 'householdIncomeLessThan10000USDPct',
# 'DP03_0053E': 'householdIncome10000To14999USD',
# 'DP03_0053PE': 'householdIncome10000To14999USDPct',
# 'DP03_0054E': 'householdIncome15000To24999USD',
# 'DP03_0054PE': 'householdIncome15000To24999USDPct',
# 'DP03_0055E': 'householdIncome25000To34999USD',
# 'DP03_0055PE': 'householdIncome25000To34999USDPct',
# 'DP03_0056E': 'householdIncome35000To49999USD',
# 'DP03_0056PE': 'householdIncome35000To49999USDPct',
# 'DP03_0057E': 'householdIncome50000To74999USD',
# 'DP03_0057PE': 'householdIncome50000To74999USDPct',
# 'DP03_0058E': 'householdIncome75000To99999USD',
# 'DP03_0058PE': 'householdIncome75000To99999USDPct',
# 'DP03_0059E': 'householdIncome100000To149999USD',
# 'DP03_0059PE': 'householdIncome100000To149999USDPct',
# 'DP03_0060E': 'householdIncome150000To199999USD',
# 'DP03_0060PE': 'householdIncome150000To199999USDPct',
# 'DP03_0061E': 'householdIncomeMoreThan200000USD',
# 'DP03_0061PE': 'householdIncomeMoreThan200000USDPct',
# 'DP03_0062E': 'medianHouseholdIncomeUSD',
# 'DP03_0063E': 'meanHouseholdIncomeUSD',
# #### Occupation
# 'DP03_0026E': 'civilianEmployedPopulation16YearsAndOver',
# 'DP03_0027E': 'managementBusinessScienceAndArtsOccupations',
# 'DP03_0027PE': 'managementBusinessScienceAndArtsOccupationsPct',
# 'DP03_0028E': 'serviceOccupations',
# 'DP03_0028PE': 'serviceOccupationsPct',
# 'DP03_0029E': 'salesAndOfficeOccupations',
# 'DP03_0029PE': 'salesAndOfficeOccupationsPct',
# 'DP03_0030E': 'naturalResourcesConstructionAndMaintenanceOccupations',
# 'DP03_0030PE': 'naturalResourcesConstructionAndMaintenanceOccupationsPct',
# 'DP03_0031E': 'productionTransportationAndMaterialMovingOccupations',
# 'DP03_0031PE': 'productionTransportationAndMaterialMovingOccupationsPct'
# ## Geographic granularity
# Data are available at 3 levels of granularity:
# * US County
# * US Zip Code
# * US Census Tract
# ## Query Examples
# ### Get Data By US County
# If the state and county fips codes are available, economic characteristics can be retrieved directly.
#
# Note, fips codes are represented as strings.
# ##### Example: Commuting
state_fips = '06'
county_fips = '073'
query = """
MATCH (c:Commuting{countyFips:$county_fips, stateFips:$state_fips})
RETURN c.stateFips, c.countyFips, c.droveAloneToWorkInCarTruckOrVanPct, c.publicTransportToWorkPct,
c.walkedToWorkPct, c.otherMeansOfCommutingToWorkPct, c.workedAtHomePct
"""
df = graph.run(query, county_fips=county_fips, state_fips=state_fips).to_data_frame()
df.head()
# Example: Get Employment characteristics by traversing the KG
county = 'Los Angeles County'
query = """
MATCH (a:Admin2{name:$admin2})-[:HAS_ECONOMICS]-(:Economics)-[:HAS_EMPLOYMENT]-(e:Employment)
RETURN a.name, e.population16YearsAndOverInLaborForcePct, e.population16YearsAndOverInCivilianLaborForcePct,
e.population16YearsAndOverInArmedForcesPct, e.population16YearsAndOverNotInLaborForcePct
"""
df = graph.run(query, admin2=county).to_data_frame()
df.head()
# ### Get Data by US Postal Code
#
# Note, postal codes are represented as strings.
zip_code = '92130'
query = """
MATCH (h:HealthInsurance{postalCode: $zip_code})
RETURN h.postalCode, h.withHealthInsuranceCoveragePct, h.withPrivateHealthInsurancePct,
h.withPublicCoveragePct, h.noHealthInsuranceCoveragePct
"""
df = graph.run(query, zip_code=zip_code).to_data_frame()
df.head()
# ##### Example: List income data for Zip codes with a place name
#
# Note, Zip code areas may cross city boundaries. Place names are the preferred names used by the US Postal Service.
# +
place_name = '<NAME>'
query = """
MATCH (p:PostalCode{placeName:$place_name})-[:HAS_ECONOMICS]-(:Economics)-[:HAS_INCOME]-(i:Income)
RETURN p.name AS `Zip code`,
i.medianHouseholdIncomeUSD AS `Median Household Income`,
i.meanHouseholdIncomeUSD AS `Mean Household Income`
"""
df = graph.run(query, place_name=place_name).to_data_frame()
df.head()
# -
df.plot.bar(x='Zip code',
y=["Median Household Income", "Mean Household Income"],
title='Income [USD]', rot=0);
# ### Get Data by US Census Tract
#
# Note, tracts are represented at strings.
# ##### Example: Occupations for a tract
tract = '06073008324'
query = """
MATCH (o:Occupation{tract: $tract})
RETURN o.tract,
o.managementBusinessScienceAndArtsOccupationsPct,
o.serviceOccupationsPct,
o.salesAndOfficeOccupationsPct,
o.naturalResourcesConstructionAndMaintenanceOccupationsPct,
o.productionTransportationAndMaterialMovingOccupationsPct
"""
df = graph.run(query, tract=tract).to_data_frame()
df.head()
# ##### Example: List Occupations for all tracts in a county
# +
state = 'California'
county = 'Orange County'
query = """
MATCH (a1:Admin1{name: $admin1})-[:IN]-(a2:Admin2{name: $admin2})-[:IN]-(t:Tract)-[:HAS_ECONOMICS]-(:Economics)-[:HAS_OCCUPATION]-(o:Occupation)
RETURN a1.name AS State, a2.name AS County, t.name AS Tract,
o.managementBusinessScienceAndArtsOccupationsPct,
o.serviceOccupationsPct,
o.salesAndOfficeOccupationsPct,
o.naturalResourcesConstructionAndMaintenanceOccupationsPct,
o.productionTransportationAndMaterialMovingOccupationsPct
"""
df = graph.run(query, admin1=state, admin2=county).to_data_frame()
df.head()
| notebooks/queries/EconomicCharacteristics.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table class="ee-notebook-buttons" align="left">
# <td><a target="_blank" href="https://github.com/giswqs/earthengine-py-notebooks/tree/master/Algorithms/Segmentation/segmentation_snic.ipynb"><img width=32px src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" /> View source on GitHub</a></td>
# <td><a target="_blank" href="https://nbviewer.jupyter.org/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/Segmentation/segmentation_snic.ipynb"><img width=26px src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/38/Jupyter_logo.svg/883px-Jupyter_logo.svg.png" />Notebook Viewer</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/giswqs/earthengine-py-notebooks/blob/master/Algorithms/Segmentation/segmentation_snic.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" /> Run in Google Colab</a></td>
# </table>
# ## Install Earth Engine API and geemap
# Install the [Earth Engine Python API](https://developers.google.com/earth-engine/python_install) and [geemap](https://github.com/giswqs/geemap). The **geemap** Python package is built upon the [ipyleaflet](https://github.com/jupyter-widgets/ipyleaflet) and [folium](https://github.com/python-visualization/folium) packages and implements several methods for interacting with Earth Engine data layers, such as `Map.addLayer()`, `Map.setCenter()`, and `Map.centerObject()`.
# The following script checks if the geemap package has been installed. If not, it will install geemap, which automatically installs its [dependencies](https://github.com/giswqs/geemap#dependencies), including earthengine-api, folium, and ipyleaflet.
#
# **Important note**: A key difference between folium and ipyleaflet is that ipyleaflet is built upon ipywidgets and allows bidirectional communication between the front-end and the backend enabling the use of the map to capture user input, while folium is meant for displaying static data only ([source](https://blog.jupyter.org/interactive-gis-in-jupyter-with-ipyleaflet-52f9657fa7a)). Note that [Google Colab](https://colab.research.google.com/) currently does not support ipyleaflet ([source](https://github.com/googlecolab/colabtools/issues/60#issuecomment-596225619)). Therefore, if you are using geemap with Google Colab, you should use [`import geemap.eefolium`](https://github.com/giswqs/geemap/blob/master/geemap/eefolium.py). If you are using geemap with [binder](https://mybinder.org/) or a local Jupyter notebook server, you can use [`import geemap`](https://github.com/giswqs/geemap/blob/master/geemap/geemap.py), which provides more functionalities for capturing user input (e.g., mouse-clicking and moving).
# +
# Installs geemap package
import subprocess
try:
import geemap
except ImportError:
print('geemap package not installed. Installing ...')
subprocess.check_call(["python", '-m', 'pip', 'install', 'geemap'])
# Checks whether this notebook is running on Google Colab
try:
import google.colab
import geemap.eefolium as geemap
except:
import geemap
# Authenticates and initializes Earth Engine
import ee
try:
ee.Initialize()
except Exception as e:
ee.Authenticate()
ee.Initialize()
# -
# ## Create an interactive map
# The default basemap is `Google Maps`. [Additional basemaps](https://github.com/giswqs/geemap/blob/master/geemap/basemaps.py) can be added using the `Map.add_basemap()` function.
Map = geemap.Map(center=[40,-100], zoom=4)
Map
# ## Add Earth Engine Python script
# +
# Add Earth Engine dataset
# imageCollection = ee.ImageCollection("USDA/NAIP/DOQQ"),
# geometry = ee.Geometry.Polygon(
# [[[-121.89511299133301, 38.98496606984683],
# [-121.89511299133301, 38.909335196675435],
# [-121.69358253479004, 38.909335196675435],
# [-121.69358253479004, 38.98496606984683]]], {}, False),
# geometry2 = ee.Geometry.Polygon(
# [[[-108.34304809570307, 36.66975278349341],
# [-108.34225416183466, 36.66977859999848],
# [-108.34226489067072, 36.67042400981031],
# [-108.34308028221125, 36.670380982657925]]]),
# imageCollection2 = ee.ImageCollection("USDA/NASS/CDL"),
# cdl2016 = ee.Image("USDA/NASS/CDL/2016")
# Map.centerObject(geometry, {}, 'roi')
# # Map.addLayer(ee.Image(1), {'palette': "white"})
# cdl2016 = cdl2016.select(0).clip(geometry)
# function erode(img, distance) {
# d = (img.Not().unmask(1) \
# .fastDistanceTransform(30).sqrt() \
# .multiply(ee.Image.pixelArea().sqrt()))
# return img.updateMask(d.gt(distance))
# }
# function dilate(img, distance) {
# d = (img.fastDistanceTransform(30).sqrt() \
# .multiply(ee.Image.pixelArea().sqrt()))
# return d.lt(distance)
# }
# function expandSeeds(seeds) {
# seeds = seeds.unmask(0).focal_max()
# return seeds.updateMask(seeds)
# }
# bands = ["R", "G", "B", "N"]
# img = imageCollection \
# .filterDate('2015-01-01', '2017-01-01') \
# .filterBounds(geometry) \
# .mosaic()
# img = ee.Image(img).clip(geometry).divide(255).select(bands)
# Map.addLayer(img, {'gamma': 0.8}, "RGBN", False)
# seeds = ee.Algorithms.Image.Segmentation.seedGrid(36)
# # Apply a softening.
# kernel = ee.Kernel.gaussian(3)
# img = img.convolve(kernel)
# Map.addLayer(img, {'gamma': 0.8}, "RGBN blur", False)
# # Compute and display NDVI, NDVI slices and NDVI gradient.
# ndvi = img.normalizedDifference(["N", "R"])
# # print(ui.Chart.image.histogram(ndvi, geometry, 10))
# Map.addLayer(ndvi, {'min':0, 'max':1, 'palette': ["black", "tan", "green", "darkgreen"]}, "NDVI", False)
# Map.addLayer(ndvi.gt([0, 0.2, 0.40, 0.60, 0.80, 1.00]).reduce('sum'), {'min':0, 'max': 6}, "NDVI steps", False)
# ndviGradient = ndvi.gradient().pow(2).reduce('sum').sqrt()
# Map.addLayer(ndviGradient, {'min':0, 'max':0.01}, "NDVI gradient", False)
# gradient = img.spectralErosion().spectralGradient('emd')
# Map.addLayer(gradient, {'min':0, 'max': 0.3}, "emd", False)
# # Run SNIC on the regular square grid.
# snic = ee.Algorithms.Image.Segmentation.SNIC({
# 'image': img,
# 'size': 32,
# compactness: 5,
# connectivity: 8,
# neighborhoodSize:256,
# seeds: seeds
# }).select(["R_mean", "G_mean", "B_mean", "N_mean", "clusters"], ["R", "G", "B", "N", "clusters"])
# clusters = snic.select("clusters")
# Map.addLayer(clusters.randomVisualizer(), {}, "clusters")
# Map.addLayer(snic, {'bands': ["R", "G", "B"], 'min':0, 'max':1, 'gamma': 0.8}, "means", False)
# Map.addLayer(expandSeeds(seeds))
# # Compute per-cluster stdDev.
# stdDev = img.addBands(clusters).reduceConnectedComponents(ee.Reducer.stdDev(), "clusters", 256)
# Map.addLayer(stdDev, {'min':0, 'max':0.1}, "StdDev")
# # Display outliers as transparent
# outliers = stdDev.reduce('sum').gt(0.25)
# Map.addLayer(outliers.updateMask(outliers.Not()), {}, "Outliers", False)
# # Within each outlier, find most distant member.
# distance = img.select(bands).spectralDistance(snic.select(bands), "sam").updateMask(outliers)
# maxDistance = distance.addBands(clusters).reduceConnectedComponents(ee.Reducer.max(), "clusters", 256)
# Map.addLayer(distance, {'min':0, 'max':0.3}, "max distance")
# Map.addLayer(expandSeeds(expandSeeds(distance.eq(maxDistance))), {'palette': ["red"]}, "second seeds")
# newSeeds = seeds.unmask(0).add(distance.eq(maxDistance).unmask(0))
# newSeeds = newSeeds.updateMask(newSeeds)
# # Run SNIC again with both sets of seeds.
# snic2 = ee.Algorithms.Image.Segmentation.SNIC({
# 'image': img,
# 'size': 32,
# compactness: 5,
# connectivity: 8,
# neighborhoodSize: 256,
# seeds: newSeeds
# }).select(["R_mean", "G_mean", "B_mean", "N_mean", "clusters"], ["R", "G", "B", "N", "clusters"])
# clusters2 = snic2.select("clusters")
# Map.addLayer(clusters2.randomVisualizer(), {}, "clusters 2")
# Map.addLayer(snic2, {'bands': ["R", "G", "B"], 'min':0, 'max':1, 'gamma': 0.8}, "means", False)
# # Compute outliers again.
# stdDev2 = img.addBands(clusters2).reduceConnectedComponents(ee.Reducer.stdDev(), "clusters", 256)
# Map.addLayer(stdDev2, {'min':0, 'max':0.1}, "StdDev 2")
# outliers2 = stdDev2.reduce('sum').gt(0.25)
# outliers2 = outliers2.updateMask(outliers2.Not())
# Map.addLayer(outliers2, {}, "Outliers 2", False)
# # Show the final set of seeds.
# Map.addLayer(expandSeeds(newSeeds), {'palette': "white"}, "newSeeds")
# Map.addLayer(expandSeeds(distance.eq(maxDistance)), {'palette': ["red"]}, "second seeds")
# # Area, Perimeter, Width and Height (using snic1 for speed)
# area = ee.Image.pixelArea().addBands(clusters).reduceConnectedComponents(ee.Reducer.sum(), "clusters", 256)
# Map.addLayer(area, {'min':50000, 'max': 500000}, "Cluster Area")
# minMax = clusters.reduceNeighborhood(ee.Reducer.minMax(), ee.Kernel.square(1))
# perimeterPixels = minMax.select(0).neq(minMax.select(1)).rename('perimeter')
# Map.addLayer(perimeterPixels, {'min': 0, 'max': 1}, 'perimeterPixels')
# perimeter = perimeterPixels.addBands(clusters) \
# .reduceConnectedComponents(ee.Reducer.sum(), 'clusters', 256)
# Map.addLayer(perimeter, {'min': 100, 'max': 400}, 'Perimeter size', False)
# sizes = ee.Image.pixelLonLat().addBands(clusters).reduceConnectedComponents(ee.Reducer.minMax(), "clusters", 256)
# width = sizes.select("longitude_max").subtract(sizes.select("longitude_min"))
# height = sizes.select("latitude_max").subtract(sizes.select("latitude_min"))
# Map.addLayer(width, {'min':0, 'max':0.02}, "Cluster width")
# Map.addLayer(height, {'min':0, 'max':0.02}, "Cluster height")
# -
# ## Display Earth Engine data layers
Map.addLayerControl() # This line is not needed for ipyleaflet-based Map.
Map
| Algorithms/Segmentation/segmentation_snic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: QSRL
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/AI4Finance-Foundation/FinRL/blob/master/tutorials/3-Practical/FinRL_China_A_Share_Market.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="ef0f3a7e"
# ## Quantitative trading in China A stock market with FinRL
# + [markdown] id="GiOWTM8RRG_n"
# Install FinRL
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="Q8gKimq2PZDh" outputId="ea18fe12-1b5e-492e-fa4a-53f5fa776694"
# !pip install git+https://github.com/AI4Finance-Foundation/FinRL.git
# + [markdown] id="tmt2578_RI2-"
# Install other libraries
# + colab={"base_uri": "https://localhost:8080/"} id="zDejJbjYQuUi" outputId="13f9a389-1113-4a67-b962-68a29ece6c21"
# !pip install stockstats
# !pip install tushare
#install talib
# !wget http://prdownloads.sourceforge.net/ta-lib/ta-lib-0.4.0-src.tar.gz
# !tar xvzf ta-lib-0.4.0-src.tar.gz
import os
os.chdir('ta-lib')
# !./configure --prefix=/usr
# !make
# !make install
os.chdir('../')
# !pip install TA-Lib
# + colab={"base_uri": "https://localhost:8080/"} id="H0-reEAYJTkU" outputId="2d6895dc-93a4-4661-934d-8bf1cd857e9c"
# %cd /
# !git clone https://github.com/AI4Finance-Foundation/FinRL-Meta
# %cd /FinRL-Meta/
# + [markdown] id="42ac7297"
# ### Import modules
# + colab={"base_uri": "https://localhost:8080/"} id="fluid-taylor" outputId="19584d54-5357-49ae-bc06-96bf66434867"
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
from IPython import display
display.set_matplotlib_formats("svg")
from finrl_meta import config
from finrl_meta.data_processors.processor_tusharepro import TushareProProcessor, ReturnPlotter
from finrl_meta.env_stock_trading.env_stocktrading_China_A_shares import StockTradingEnv
from drl_agents.stablebaselines3_models import DRLAgent
pd.options.display.max_columns = None
print("ALL Modules have been imported!")
# + [markdown] id="eb601f4a"
# ### Create folders
# + id="339ab411"
import os
if not os.path.exists("./datasets" ):
os.makedirs("./datasets" )
if not os.path.exists("./trained_models"):
os.makedirs("./trained_models" )
if not os.path.exists("./tensorboard_log"):
os.makedirs("./tensorboard_log" )
if not os.path.exists("./results" ):
os.makedirs("./results" )
# + [markdown] id="74ad0a26"
# ### Download data, cleaning and feature engineering
# + id="transsexual-crack"
ticket_list=['600000.SH', '600009.SH', '600016.SH', '600028.SH', '600030.SH',
'600031.SH', '600036.SH', '600050.SH', '600104.SH', '600196.SH',
'600276.SH', '600309.SH', '600519.SH', '600547.SH', '600570.SH']
train_start_date='2015-01-01'
train_stop_date='2019-08-01'
val_start_date='2019-08-01'
val_stop_date='2021-01-03'
token='<KEY>'
# + colab={"base_uri": "https://localhost:8080/"} id="preceding-selling" outputId="516e0959-1416-4a76-be57-0dd344c36798"
# download and clean
ts_processor = TushareProProcessor(data_source="tusharepro",
start_date=train_start_date,
end_date=val_stop_date,
time_interval="1d",
token=token)
ts_processor.download_data(ticker_list=ticket_list)
# + colab={"base_uri": "https://localhost:8080/", "height": 441} id="X-xuvxev2sge" outputId="bd8a9dd5-4ad2-4d7b-aa20-6d9281d4d6a4"
ts_processor.clean_data()
ts_processor.dataframe
# + colab={"base_uri": "https://localhost:8080/", "height": 840} id="3e40b006" outputId="f31bde57-4e4f-4f3f-8468-9f57cd75ad44"
# add_technical_indicator
ts_processor.add_technical_indicator(config.INDICATORS)
ts_processor.clean_data()
ts_processor.dataframe
# + [markdown] id="25fc2e45"
# ### Split traning dataset
# + colab={"base_uri": "https://localhost:8080/"} id="pending-mother" outputId="509d3894-3d79-414f-c3ed-ad9d8c724632"
train =ts_processor.data_split(ts_processor.dataframe, train_start_date, train_stop_date)
len(train.tic.unique())
# + colab={"base_uri": "https://localhost:8080/"} id="signal-rochester" outputId="16137e43-0864-4f25-e839-81ecdf0a9921"
train.tic.unique()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="future-while" outputId="01aecdca-efd4-4b16-b75d-65f084f5884e"
train.head()
# + colab={"base_uri": "https://localhost:8080/"} id="72e9bcc2" outputId="53cfc911-3346-45bf-c07e-be090bac7ac4"
train.shape
# + colab={"base_uri": "https://localhost:8080/"} id="provincial-wichita" outputId="c783d9c4-ac2e-45ad-84a2-58bbb445e15e"
stock_dimension = len(train.tic.unique())
state_space = stock_dimension*(len(config.INDICATORS)+2)+1
print(f"Stock Dimension: {stock_dimension}, State Space: {state_space}")
# + [markdown] id="e90bbf93"
# ### Train
# + id="dcb153fc"
env_kwargs = {
"stock_dim": stock_dimension,
"hmax": 1000,
"initial_amount": 1000000,
"buy_cost_pct":6.87e-5,
"sell_cost_pct":1.0687e-3,
"reward_scaling": 1e-4,
"state_space": state_space,
"action_space": stock_dimension,
"tech_indicator_list": config.INDICATORS,
"print_verbosity": 1,
"initial_buy":True,
"hundred_each_trade":True
}
e_train_gym = StockTradingEnv(df = train, **env_kwargs)
# + [markdown] id="oyat-FppWzZ_"
# ## DDPG
# + colab={"base_uri": "https://localhost:8080/"} id="loaded-modem" outputId="96195d35-6372-4098-d79f-83331ec59805"
env_train, _ = e_train_gym.get_sb_env()
print(type(env_train))
# + colab={"base_uri": "https://localhost:8080/"} id="thick-blackjack" outputId="a58f7766-1237-44be-a06b-4c45b6593e36"
agent = DRLAgent(env = env_train)
DDPG_PARAMS = {
"batch_size": 256,
"buffer_size": 50000,
"learning_rate": 0.0005,
"action_noise":"normal",
}
POLICY_KWARGS = dict(net_arch=dict(pi=[64, 64], qf=[400, 300]))
model_ddpg = agent.get_model("ddpg", model_kwargs = DDPG_PARAMS, policy_kwargs=POLICY_KWARGS)
# + colab={"base_uri": "https://localhost:8080/"} id="growing-supplier" outputId="d53c30d1-87cf-4ef5-a7f9-2d221e1a7bda"
trained_ddpg = agent.train_model(model=model_ddpg,
tb_log_name='ddpg',
total_timesteps=10000)
# + [markdown] id="M4QlPuW4XJZ4"
# ## A2C
# + colab={"base_uri": "https://localhost:8080/"} id="DRWP9owCXK2n" outputId="c869f759-e711-4544-c126-ad7a20e5f884"
agent = DRLAgent(env = env_train)
model_a2c = agent.get_model("a2c")
# + colab={"base_uri": "https://localhost:8080/"} id="mGXzLTKiXMBV" outputId="d56bb067-407a-4383-df21-b339e6077859"
trained_a2c = agent.train_model(model=model_a2c,
tb_log_name='a2c',
total_timesteps=50000)
# + [markdown] id="0767b826"
# ### Trade
# + id="responsible-equity"
trade = ts_processor.data_split(ts_processor.dataframe, val_start_date, val_stop_date)
env_kwargs = {
"stock_dim": stock_dimension,
"hmax": 1000,
"initial_amount": 1000000,
"buy_cost_pct":6.87e-5,
"sell_cost_pct":1.0687e-3,
"reward_scaling": 1e-4,
"state_space": state_space,
"action_space": stock_dimension,
"tech_indicator_list": config.INDICATORS,
"print_verbosity": 1,
"initial_buy":False,
"hundred_each_trade":True
}
e_trade_gym = StockTradingEnv(df = trade, **env_kwargs)
# + colab={"base_uri": "https://localhost:8080/"} id="first-hierarchy" outputId="290b701e-e089-427d-944e-0e4eef01cf96"
df_account_value, df_actions = DRLAgent.DRL_prediction(model=trained_ddpg,
environment = e_trade_gym)
# + colab={"base_uri": "https://localhost:8080/", "height": 455} id="8b9d6c2b" outputId="ab8a55d3-9c7a-4677-f048-8e024ba2a758"
df_actions.to_csv("action.csv",index=False)
df_actions
# + [markdown] id="6ea8a81c"
# ### Backtest
# + id="727d62e0"
# # %matplotlib inline
plotter = ReturnPlotter(df_account_value, trade, val_start_date, val_stop_date)
# plotter.plot_all()
# + colab={"base_uri": "https://localhost:8080/", "height": 386} id="8813f87d" outputId="260afac2-097b-4b46-8964-c7983533d2c9"
# %matplotlib inline
plotter.plot()
# + id="d155bcd5"
# # %matplotlib inline
# # ticket: SSE 50:000016
# plotter.plot("000016")
# + [markdown] id="ce724f71"
# #### Use pyfolio
# + colab={"base_uri": "https://localhost:8080/"} id="79c82f77" outputId="64f56572-06b0-4e85-e4a6-4840dbd8f733"
# CSI 300
baseline_df = plotter.get_baseline("399300")
# + colab={"base_uri": "https://localhost:8080/"} id="e4ab0438" outputId="c75dd0a3-4714-4aa3-b71e-5c8cfca3d511"
import pyfolio
from pyfolio import timeseries
daily_return = plotter.get_return(df_account_value)
daily_return_base = plotter.get_return(baseline_df, value_col_name="close")
perf_func = timeseries.perf_stats
perf_stats_all = perf_func(returns=daily_return,
factor_returns=daily_return_base,
positions=None, transactions=None, turnover_denom="AGB")
print("==============DRL Strategy Stats===========")
perf_stats_all
# + colab={"base_uri": "https://localhost:8080/"} id="8jnvuVBdWV9r" outputId="5afe15e3-3767-4b47-ecd6-d2e5dd17b0d7"
import pyfolio
from pyfolio import timeseries
daily_return = plotter.get_return(df_account_value)
daily_return_base = plotter.get_return(baseline_df, value_col_name="close")
perf_func = timeseries.perf_stats
perf_stats_all = perf_func(returns=daily_return_base,
factor_returns=daily_return_base,
positions=None, transactions=None, turnover_denom="AGB")
print("==============Baseline Strategy Stats===========")
perf_stats_all
# + id="8215cc99"
# with pyfolio.plotting.plotting_context(font_scale=1.1):
# pyfolio.create_full_tear_sheet(returns = daily_return,
# benchmark_rets = daily_return_base, set_context=False)
# + [markdown] id="ce11979d"
# ### Authors
# github username: oliverwang15, eitin-infant
| tutorials/3-Practical/FinRL_China_A_Share_Market.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to `pandas`
import numpy as np
import pandas as pd
# ## Series and Data Frames
# ### Series objects
# A `Series` is like a vector. All elements must have the same type or are nulls.
s = pd.Series([1,1,2,3] + [None])
s
# ### Size
s.size
# ### Unique Counts
s.value_counts()
# ### Special types of series
# #### Strings
words = 'the quick brown fox jumps over the lazy dog'.split()
s1 = pd.Series([' '.join(item) for item in zip(words[:-1], words[1:])])
s1
s1.str.upper()
s1.str.split()
s1.str.split().str[1]
# ### Categories
s2 = pd.Series(['Asian', 'Asian', 'White', 'Black', 'White', 'Hispanic'])
s2
s2 = s2.astype('category')
s2
s2.cat.categories
s2.cat.codes
# ### Ordered categories
s3 = pd.Series(['Mon', 'Tue', 'Wed', 'Thu', 'Fri']).astype('category')
s3
s3.cat.ordered
s3.sort_values()
s3 = s3.cat.reorder_categories(['Mon', 'Tue', 'Wed', 'Thu', 'Fri'], ordered=True)
s3.cat.ordered
s3.sort_values()
# ### DataFrame objects
# A `DataFrame` is like a matrix. Columns in a `DataFrame` are `Series`.
#
# - Each column in a DataFrame represents a **variale**
# - Each row in a DataFrame represents an **observation**
# - Each cell in a DataFrame represents a **value**
df = pd.DataFrame(dict(num=[1,2,3] + [None]))
df
df.num
# ### Index
#
# Row and column identifiers are of `Index` type.
#
# Somewhat confusingly, index is also a a synonym for the row identifiers.
df.index
# #### Setting a column as the row index
df
df1 = df.set_index('num')
df1
# #### Making an index into a column
df1.reset_index()
# #### Sometimes you don't need to retain the index information
df = pd.DataFrame(dict(letters = list('ABCDEFG')))
df
df = df[df.letters.isin(list('AEIOU'))]
df
df.reset_index(drop=True)
# ### Columns
#
# This is just a different index object
df.columns
# ### Getting raw values
#
# Sometimes you just want a `numpy` array, and not a `pandas` object.
df.values
# ## Creating Data Frames
# ### Manual
n = 5
dates = pd.date_range(start='now', periods=n, freq='d')
df = pd.DataFrame(dict(pid=np.random.randint(100, 999, n),
weight=np.random.normal(70, 20, n),
height=np.random.normal(170, 15, n),
date=dates,
))
df
# ### From numpy array
pd.DataFrame(np.eye(3,2), columns=['A', 'B'], index=['x', 'y', 'z'])
# ### From URL
# +
url = "https://gist.githubusercontent.com/netj/8836201/raw/6f9306ad21398ea43cba4f7d537619d0e07d5ae3/iris.csv"
df = pd.read_csv(url)
df.head()
# -
# ### From file
#
# You can read in data from many different file types - plain text, JSON, spreadsheets, databases etc. Functions to read in data look like `read_X` where X is the data type.
# %%file measures.txt
pid weight height date
328 72.654347 203.560866 2018-11-11 14:16:18.148411
756 34.027679 189.847316 2018-11-12 14:16:18.148411
185 28.501914 158.646074 2018-11-13 14:16:18.148411
507 17.396343 180.795993 2018-11-14 14:16:18.148411
919 64.724301 173.564725 2018-11-15 14:16:18.148411
df = pd.read_table('measures.txt')
df
# ## Indexing Data Frames
# ### Implicit defaults
#
# if you provide a slice, it is assumed that you are asking for rows.
df[1:3]
# If you provide a singe value or list, it is assumed that you are asking for columns.
df[['pid', 'weight']]
# ### Extracting a column
# #### Dictionary style access
df['pid']
# #### Property style access
#
# This only works for column names tat are also valid Python identifier (i.e., no spaces or dashes or keywords)
df.pid
# ### Indexing by location
#
# This is similar to `numpy` indexing
df.iloc[1:3, :]
df.iloc[1:3, [True, False, True, False]]
# ### Indexing by name
df.loc[1:3, 'weight':'height']
# **Warning**: When using `loc`, the row slice indicates row names, not positions.
df1 = df.copy()
df1.index = df.index + 1
df1
df1.loc[1:3, 'weight':'height']
# ## Structure of a Data Frame
# ### Data types
df.dtypes
# ### Converting data types
# #### Using `astype` on one column
df.pid = df.pid.astype('category')
# #### Using `astype` on multiple columns
df = df.astype(dict(weight=float,
height=float))
# #### Using a conversion function
df.date = pd.to_datetime(df.date)
# #### Check
df.dtypes
# ### Basic properties
df.size
df.shape
df.describe()
df.info()
# ### Inspection
df.head(n=3)
df.tail(n=3)
df.sample(n=3)
df.sample(frac=0.5)
# ## Selecting, Renaming and Removing Columns
# ### Selecting columns
df.filter(items=['pid', 'date'])
df.filter(regex='.*ght')
# I'm not actually clear about what `like` does - it seeems to mean "contains"
df.filter(like='ei')
# #### Filter has an optional axis argument if you want to select by row index
df.filter([0,1,3,4], axis=0)
# #### Note that you can also use regular string methods on the columns
df.loc[:, df.columns.str.contains('d')]
# ### Renaming columns
df.rename(dict(weight='w', height='h'), axis=1)
orig_cols = df.columns
df.columns = list('abcd')
df
df.columns = orig_cols
df
# ### Removing columns
df.drop(['pid', 'date'], axis=1)
df.drop(columns=['pid', 'date'])
df.drop(columns=df.columns[df.columns.str.contains('d')])
# ## Selecting, Renaming and Removing Rows
# ### Selecting rows
df[df.weight.between(60,70)]
df[(69 <= df.weight) & (df.weight < 70)]
df[df.date.between(pd.to_datetime('2018-11-13'),
pd.to_datetime('2018-11-15 23:59:59'))]
df.query('weight <= 70 and height > 90')
# ### Renaming rows
df.rename({i:letter for i,letter in enumerate('abcde')})
df.index = ['the', 'quick', 'brown', 'fox', 'jumphs']
df
df = df.reset_index(drop=True)
df
# ### Dropping rows
df.drop([1,3], axis=0)
# #### Dropping duplicated data
df['something'] = [1,1,None,2,None]
df['nothing'] = [None, None, None, None, None]
df.loc[df.something.duplicated()]
df.drop_duplicates(subset='something')
# #### Dropping missing data
df
df.dropna()
df.dropna(axis=1)
df.dropna(axis=1, how='all')
# #### Brute force replacement of missing values
df.something.fillna(0)
df.something.fillna(df.something.mean())
df.something.ffill()
df.something.bfill()
df.something.interpolate()
# ## Transforming and Creating Columns
df.assign(bmi=df['weight'] / (df['height']/100)**2)
df['bmi'] = df['weight'] / (df['height']/100)**2
df
df['something'] = [2,2,None,None,3]
df
# ## Sorting Data Frames
# ### Sort on indexes
df.sort_index(axis=1)
df.sort_index(axis=0, ascending=False)
# ### Sort on values
df.sort_values(by=['something', 'bmi'], ascending=[True, False])
# ## Summarizing
# ### Apply an aggregation function
df.select_dtypes(include=np.number)
df.select_dtypes(include=np.number).agg(np.sum)
df.agg(['count', np.sum, np.mean])
# ## Split-Apply-Combine
#
# We often want to perform subgroup analysis (conditioning by some discrete or categorical variable). This is done with `groupby` followed by an aggregate function. Conceptually, we split the data frame into separate groups, apply the aggregate function to each group separately, then combine the aggregated results back into a single data frame.
df['treatment'] = list('ababa')
df
grouped = df.groupby('treatment')
grouped.get_group('a')
grouped.mean()
# ### Using `agg` with `groupby`
grouped.agg('mean')
grouped.agg(['mean', 'std'])
grouped.agg({'weight': ['mean', 'std'], 'height': ['min', 'max'], 'bmi': lambda x: (x**2).sum()})
# ### Using `trasnform` wtih `groupby`
g_mean = grouped[['weight', 'height']].transform(np.mean)
g_mean
g_std = grouped[['weight', 'height']].transform(np.std)
g_std
(df[['weight', 'height']] - g_mean)/g_std
# ## Combining Data Frames
df
df1 = df.iloc[3:].copy()
df1.drop('something', axis=1, inplace=True)
df1
# ### Adding rows
#
# Note that `pandas` aligns by column indexes automatically.
df.append(df1, sort=False)
pd.concat([df, df1], sort=False)
# ### Adding columns
df.pid
df2 = pd.DataFrame(dict(pid=[649, 533, 400, 600], age=[23,34,45,56]))
df2.pid
df.pid = df.pid.astype('int')
pd.merge(df, df2, on='pid', how='inner')
pd.merge(df, df2, on='pid', how='left')
pd.merge(df, df2, on='pid', how='right')
pd.merge(df, df2, on='pid', how='outer')
# ### Merging on the index
df1 = pd.DataFrame(dict(x=[1,2,3]), index=list('abc'))
df2 = pd.DataFrame(dict(y=[4,5,6]), index=list('abc'))
df3 = pd.DataFrame(dict(z=[7,8,9]), index=list('abc'))
df1
df2
df3
df1.join([df2, df3])
# ## Fixing common DataFrame issues
# ### Multiple variables in a column
df = pd.DataFrame(dict(pid_treat = ['A-1', 'B-2', 'C-1', 'D-2']))
df
df.pid_treat.str.split('-')
df.pid_treat.str.split('-').apply(pd.Series, index=['pid', 'treat'])
# ### Multiple values in a cell
df = pd.DataFrame(dict(pid=['a', 'b', 'c'], vals = [(1,2,3), (4,5,6), (7,8,9)]))
df
# If you want the values in separate columns
df[['t1', 't2', 't3']] = df.vals.apply(pd.Series)
df
# If you want a separate row for each value
df.explode(column='vals')
# ## Reshaping Data Frames
#
# Sometimes we need to make rows into columns or vice versa.
# ### Converting multiple columns into a single column
#
# This is often useful if you need to condition on some variable.
url = 'https://raw.githubusercontent.com/uiuc-cse/data-fa14/gh-pages/data/iris.csv'
iris = pd.read_csv(url)
iris.head()
iris.shape
df_iris = pd.melt(iris, id_vars='species')
df_iris.sample(10)
# ## Chaining commands
#
# Sometimes you see this functional style of method chaining that avoids the need for temporary intermediate variables.
(
iris.
sample(frac=0.2).
filter(regex='s.*').
assign(both=iris.sepal_length + iris.sepal_length).
groupby('species').agg(['mean', 'sum']).
pipe(lambda x: np.around(x, 1))
)
| notebooks/S04_Pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # The Efficient Frontier of Optimal Portfolio Transactions
#
# ### Introduction
#
# [Almgren and Chriss](https://cims.nyu.edu/~almgren/papers/optliq.pdf) showed that for each value of risk aversion there is a unique optimal execution strategy. The optimal strategy is obtained by minimizing the **Utility Function** $U(x)$:
#
# \begin{equation}
# U(x) = E(x) + \lambda V(x)
# \end{equation}
#
# where $E(x)$ is the **Expected Shortfall**, $V(x)$ is the **Variance of the Shortfall**, and $\lambda$ corresponds to the trader’s risk aversion. The expected shortfall and variance of the optimal trading strategy are given by:
#
# <img src="./text_images/eq.png" width="700" height="900">
#
# In this notebook, we will learn how to visualize and interpret these equations.
#
# # The Expected Shortfall
#
# As we saw in the previous notebook, even if we use the same trading list, we are not guaranteed to always get the same implementation shortfall due to the random fluctuations in the stock price. This is why we had to reframe the problem of finding the optimal strategy in terms of the average implementation shortfall and the variance of the implementation shortfall. We call the average implementation shortfall, the expected shortfall $E(x)$, and the variance of the implementation shortfall $V(x)$. So, whenever we talk about the expected shortfall we are really talking about the average implementation shortfall. Therefore, we can think of the expected shortfall as follows. Given a single trading list, the expected shortfall will be the value of the average implementation shortfall if we were to implement this trade list in the stock market many times.
#
# To see this, in the code below we implement the same trade list on 50,000 trading simulations. We call each trading simulation an episode. Each episode will consist of different random fluctuations in stock price. For each episode we will compute the corresponding implemented shortfall. After all the 50,000 trading simulations have been carried out we calculate the average implementation shortfall and the variance of the implemented shortfalls. We can then compare these values with the values given by the equations for $E(x)$ and $V(x)$ from the Almgren and Chriss model.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import utils
# Set the default figure size
plt.rcParams['figure.figsize'] = [17.0, 7.0]
# Set the liquidation time
l_time = 60
# Set the number of trades
n_trades = 60
# Set trader's risk aversion
t_risk = 1e-6
# Set the number of episodes to run the simulation
episodes = 10
utils.get_av_std(lq_time = l_time, nm_trades = n_trades, tr_risk = t_risk, trs = episodes)
# Get the AC Optimal strategy for the given parameters
ac_strategy = utils.get_optimal_vals(lq_time = l_time, nm_trades = n_trades, tr_risk = t_risk)
ac_strategy
# -
# # Extreme Trading Strategies
#
# Because some investors may be willing to take more risk than others, when looking for the optimal strategy we have to consider a wide range of risk values, ranging from those traders that want to take zero risk to those who want to take as much risk as possible. Let's take a look at these two extreme cases. We will define the **Minimum Variance** strategy as that one followed by a trader that wants to take zero risk and the **Minimum Impact** strategy at that one followed by a trader that wants to take as much risk as possible. Let's take a look at the values of $E(x)$ and $V(x)$ for these extreme trading strategies. The `utils.get_min_param()` uses the above equations for $E(x)$ and $V(x)$, along with the parameters from the trading environment to calculate the expected shortfall and standard deviation (the square root of the variance) for these strategies. We'll start by looking at the Minimum Impact strategy.
# +
import utils
# Get the minimum impact and minimum variance strategies
minimum_impact, minimum_variance = utils.get_min_param()
# -
# ### Minimum Impact Strategy
#
# This trading strategy will be taken by trader that has no regard for risk. In the Almgren and Chriss model this will correspond to having the trader's risk aversion set to $\lambda = 0$. In this case the trader will sell the shares at a constant rate over a long period of time. By doing so, he will minimize market impact, but will be at risk of losing a lot of money due to the large variance. Hence, this strategy will yield the lowest possible expected shortfall and the highest possible variance, for a given set of parameters. We can see that for the given parameters, this strategy yields an expected shortfall of \$197,000 dollars but has a very big standard deviation of over 3 million dollars.
minimum_impact
# ### Minimum Variance Strategy
#
# This trading strategy will be taken by trader that wants to take zero risk, regardless of transaction costs. In the Almgren and Chriss model this will correspond to having a variance of $V(x) = 0$. In this case, the trader would prefer to sell the all his shares immediately, causing a known price impact, rather than risk trading in small increments at successively adverse prices. This strategy will yield the smallest possible variance, $V(x) = 0$, and the highest possible expected shortfall, for a given set of parameters. We can see that for the given parameters, this strategy yields an expected shortfall of over 2.5 million dollars but has a standard deviation equal of zero.
minimum_variance
# # The Efficient Frontier
#
# The goal of Almgren and Chriss was to find the optimal strategies that lie between these two extremes. In their paper, they showed how to compute the trade list that minimizes the expected shortfall for a wide range of risk values. In their model, Almgren and Chriss used the parameter $\lambda$ to measure a trader's risk-aversion. The value of $\lambda$ tells us how much a trader is willing to penalize the variance of the shortfall, $V(X)$, relative to expected shortfall, $E(X)$. They showed that for each value of $\lambda$ there is a uniquely determined optimal execution strategy. We define the **Efficient Frontier** to be the set of all these optimal trading strategies. That is, the efficient frontier is the set that contains the optimal trading strategy for each value of $\lambda$.
#
# The efficient frontier is often visualized by plotting $(x,y)$ pairs for a wide range of $\lambda$ values, where the $x$-coordinate is given by the equation of the expected shortfall, $E(X)$, and the $y$-coordinate is given by the equation of the variance of the shortfall, $V(X)$. Therefore, for a given a set of parameters, the curve defined by the efficient frontier represents the set of optimal trading strategies that give the lowest expected shortfall for a defined level of risk.
#
# In the code below, we plot the efficient frontier for $\lambda$ values in the range $(10^{-7}, 10^{-4})$, using the default parameters in our trading environment. Each point of the frontier represents a distinct strategy for optimally liquidating the same number of stocks. A risk-averse trader, who wishes to sell quickly to reduce exposure to stock price volatility, despite the trading costs incurred in doing so, will likely choose a value of $\lambda = 10^{-4}$. On the other hand, a trader
# who likes risk, who wishes to postpones selling, will likely choose a value of $\lambda = 10^{-7}$. In the code, you can choose a particular value of $\lambda$ to see the expected shortfall and level of variance corresponding to that particular value of trader's risk aversion.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import utils
# Set the default figure size
plt.rcParams['figure.figsize'] = [17.0, 7.0]
# Plot the efficient frontier for the default values. The plot points out the expected shortfall and variance of the
# optimal strategy for the given the trader's risk aversion. Valid range for the trader's risk aversion (1e-7, 1e-4).
utils.plot_efficient_frontier(tr_risk = 1e-6)
# -
| finance/Efficient Frontier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Skip-gram word2vec
#
# In this notebook, I'll lead you through using TensorFlow to implement the word2vec algorithm using the skip-gram architecture. By implementing this, you'll learn about embedding words for use in natural language processing. This will come in handy when dealing with things like machine translation.
#
# ## Readings
#
# Here are the resources I used to build this notebook. I suggest reading these either beforehand or while you're working on this material.
#
# * A really good [conceptual overview](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/) of word2vec from <NAME>
# * [First word2vec paper](https://arxiv.org/pdf/1301.3781.pdf) from Mikolov et al.
# * [NIPS paper](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf) with improvements for word2vec also from Mikolov et al.
# * An [implementation of word2vec](http://www.thushv.com/natural_language_processing/word2vec-part-1-nlp-with-deep-learning-with-tensorflow-skip-gram/) from Thushan Ganegedara
# * TensorFlow [word2vec tutorial](https://www.tensorflow.org/tutorials/word2vec)
#
# ## Word embeddings
#
# When you're dealing with words in text, you end up with tens of thousands of classes to predict, one for each word. Trying to one-hot encode these words is massively inefficient, you'll have one element set to 1 and the other 50,000 set to 0. The matrix multiplication going into the first hidden layer will have almost all of the resulting values be zero. This a huge waste of computation.
#
# 
#
# To solve this problem and greatly increase the efficiency of our networks, we use what are called embeddings. Embeddings are just a fully connected layer like you've seen before. We call this layer the embedding layer and the weights are embedding weights. We skip the multiplication into the embedding layer by instead directly grabbing the hidden layer values from the weight matrix. We can do this because the multiplication of a one-hot encoded vector with a matrix returns the row of the matrix corresponding the index of the "on" input unit.
#
# 
#
# Instead of doing the matrix multiplication, we use the weight matrix as a lookup table. We encode the words as integers, for example "heart" is encoded as 958, "mind" as 18094. Then to get hidden layer values for "heart", you just take the 958th row of the embedding matrix. This process is called an **embedding lookup** and the number of hidden units is the **embedding dimension**.
#
# <img src='assets/tokenize_lookup.png' width=500>
#
# There is nothing magical going on here. The embedding lookup table is just a weight matrix. The embedding layer is just a hidden layer. The lookup is just a shortcut for the matrix multiplication. The lookup table is trained just like any weight matrix as well.
#
# Embeddings aren't only used for words of course. You can use them for any model where you have a massive number of classes. A particular type of model called **Word2Vec** uses the embedding layer to find vector representations of words that contain semantic meaning.
#
#
# ## Word2Vec
#
# The word2vec algorithm finds much more efficient representations by finding vectors that represent the words. These vectors also contain semantic information about the words. Words that show up in similar contexts, such as "black", "white", and "red" will have vectors near each other. There are two architectures for implementing word2vec, CBOW (Continuous Bag-Of-Words) and Skip-gram.
#
# <img src="assets/word2vec_architectures.png" width="500">
#
# In this implementation, we'll be using the skip-gram architecture because it performs better than CBOW. Here, we pass in a word and try to predict the words surrounding it in the text. In this way, we can train the network to learn representations for words that show up in similar contexts.
#
# First up, importing packages.
# +
import time
import numpy as np
import tensorflow as tf
import utils
# -
# Load the [text8 dataset](http://mattmahoney.net/dc/textdata.html), a file of cleaned up Wikipedia articles from <NAME>. The next cell will download the data set to the `data` folder. Then you can extract it and delete the archive file to save storage space.
# +
from urllib.request import urlretrieve
from os.path import isfile, isdir
from tqdm import tqdm
import zipfile
dataset_folder_path = 'data'
dataset_filename = 'text8.zip'
dataset_name = 'Text8 Dataset'
class DLProgress(tqdm):
last_block = 0
def hook(self, block_num=1, block_size=1, total_size=None):
self.total = total_size
self.update((block_num - self.last_block) * block_size)
self.last_block = block_num
if not isfile(dataset_filename):
with DLProgress(unit='B', unit_scale=True, miniters=1, desc=dataset_name) as pbar:
urlretrieve(
'http://mattmahoney.net/dc/text8.zip',
dataset_filename,
pbar.hook)
if not isdir(dataset_folder_path):
with zipfile.ZipFile(dataset_filename) as zip_ref:
zip_ref.extractall(dataset_folder_path)
with open('data/text8') as f:
text = f.read()
# -
# ## Preprocessing
#
# Here I'm fixing up the text to make training easier. This comes from the `utils` module I wrote. The `preprocess` function coverts any punctuation into tokens, so a period is changed to ` <PERIOD> `. In this data set, there aren't any periods, but it will help in other NLP problems. I'm also removing all words that show up five or fewer times in the dataset. This will greatly reduce issues due to noise in the data and improve the quality of the vector representations. If you want to write your own functions for this stuff, go for it.
words = utils.preprocess(text)
print(words[:30])
print("Total words: {}".format(len(words)))
print("Unique words: {}".format(len(set(words))))
# And here I'm creating dictionaries to covert words to integers and backwards, integers to words. The integers are assigned in descending frequency order, so the most frequent word ("the") is given the integer 0 and the next most frequent is 1 and so on. The words are converted to integers and stored in the list `int_words`.
vocab_to_int, int_to_vocab = utils.create_lookup_tables(words)
int_words = [vocab_to_int[word] for word in words]
# ## Subsampling
#
# Words that show up often such as "the", "of", and "for" don't provide much context to the nearby words. If we discard some of them, we can remove some of the noise from our data and in return get faster training and better representations. This process is called subsampling by Mikolov. For each word $w_i$ in the training set, we'll discard it with probability given by
#
# $$ P(w_i) = 1 - \sqrt{\frac{t}{f(w_i)}} $$
#
# where $t$ is a threshold parameter and $f(w_i)$ is the frequency of word $w_i$ in the total dataset.
#
# I'm going to leave this up to you as an exercise. Check out my solution to see how I did it.
#
# > **Exercise:** Implement subsampling for the words in `int_words`. That is, go through `int_words` and discard each word given the probablility $P(w_i)$ shown above. Note that $P(w_i)$ is that probability that a word is discarded. Assign the subsampled data to `train_words`.
# +
from collections import Counter
import random
threshold = 1e-5
word_counts = Counter(int_words)
total_count = len(int_words)
freqs = {word: count/total_count for word, count in word_counts.items()}
p_drop = {word: 1 - np.sqrt(threshold/freqs[word]) for word in word_counts}
print(max(p_drop.values()))
print(min(p_drop.values()))
train_words = [word for word in int_words if random.random() < (1 - p_drop[word])]
# -
print(train_words[:5])
print(len(train_words))
# ## Making batches
# Now that our data is in good shape, we need to get it into the proper form to pass it into our network. With the skip-gram architecture, for each word in the text, we want to grab all the words in a window around that word, with size $C$.
#
# From [Mikolov et al.](https://arxiv.org/pdf/1301.3781.pdf):
#
# "Since the more distant words are usually less related to the current word than those close to it, we give less weight to the distant words by sampling less from those words in our training examples... If we choose $C = 5$, for each training word we will select randomly a number $R$ in range $< 1; C >$, and then use $R$ words from history and $R$ words from the future of the current word as correct labels."
#
# > **Exercise:** Implement a function `get_target` that receives a list of words, an index, and a window size, then returns a list of words in the window around the index. Make sure to use the algorithm described above, where you chose a random number of words to from the window.
def get_target(words, idx, window_size=5):
''' Get a list of words in a window around an index. '''
R = np.random.randint(1, window_size+1)
start = idx - R if (idx - R) > 0 else 0
stop = idx + R
target_words = set(words[start:idx] + words[idx+1:stop+1])
return list(target_words)
# Here's a function that returns batches for our network. The idea is that it grabs `batch_size` words from a words list. Then for each of those words, it gets the target words in the window. I haven't found a way to pass in a random number of target words and get it to work with the architecture, so I make one row per input-target pair. This is a generator function by the way, helps save memory.
def get_batches(words, batch_size, window_size=5):
''' Create a generator of word batches as a tuple (inputs, targets) '''
n_batches = len(words)//batch_size
# only full batches
words = words[:n_batches*batch_size]
for idx in range(0, len(words), batch_size):
x, y = [], []
batch = words[idx:idx+batch_size]
for ii in range(len(batch)):
batch_x = batch[ii]
batch_y = get_target(batch, ii, window_size)
y.extend(batch_y)
x.extend([batch_x]*len(batch_y))
yield x, y
# ## Building the graph
#
# From [Chris McCormick's blog](http://mccormickml.com/2016/04/19/word2vec-tutorial-the-skip-gram-model/), we can see the general structure of our network.
# 
#
# The input words are passed in as one-hot encoded vectors. This will go into a hidden layer of linear units, then into a softmax layer. We'll use the softmax layer to make a prediction like normal.
#
# The idea here is to train the hidden layer weight matrix to find efficient representations for our words. We can discard the softmax layer becuase we don't really care about making predictions with this network. We just want the embedding matrix so we can use it in other networks we build from the dataset.
#
# I'm going to have you build the graph in stages now. First off, creating the `inputs` and `labels` placeholders like normal.
#
# > **Exercise:** Assign `inputs` and `labels` using `tf.placeholder`. We're going to be passing in integers, so set the data types to `tf.int32`. The batches we're passing in will have varying sizes, so set the batch sizes to [`None`]. To make things work later, you'll need to set the second dimension of `labels` to `None` or `1`.
train_graph = tf.Graph()
with train_graph.as_default():
inputs = tf.placeholder(tf.int32, [None], name='inputs')
labels = tf.placeholder(tf.int32, [None, None], name='labels')
# ## Embedding
#
#
# The embedding matrix has a size of the number of words by the number of units in the hidden layer. So, if you have 10,000 words and 300 hidden units, the matrix will have size $10,000 \times 300$. Remember that we're using tokenized data for our inputs, usually as integers, where the number of tokens is the number of words in our vocabulary.
#
#
# > **Exercise:** Tensorflow provides a convenient function [`tf.nn.embedding_lookup`](https://www.tensorflow.org/api_docs/python/tf/nn/embedding_lookup) that does this lookup for us. You pass in the embedding matrix and a tensor of integers, then it returns rows in the matrix corresponding to those integers. Below, set the number of embedding features you'll use (200 is a good start), create the embedding matrix variable, and use `tf.nn.embedding_lookup` to get the embedding tensors. For the embedding matrix, I suggest you initialize it with a uniform random numbers between -1 and 1 using [tf.random_uniform](https://www.tensorflow.org/api_docs/python/tf/random_uniform).
n_vocab = len(int_to_vocab)
n_embedding = 200 # Number of embedding features
with train_graph.as_default():
embedding = tf.Variable(tf.random_uniform((n_vocab, n_embedding), -1, 1))
embed = tf.nn.embedding_lookup(embedding, inputs)
# ## Negative sampling
#
#
# For every example we give the network, we train it using the output from the softmax layer. That means for each input, we're making very small changes to millions of weights even though we only have one true example. This makes training the network very inefficient. We can approximate the loss from the softmax layer by only updating a small subset of all the weights at once. We'll update the weights for the correct label, but only a small number of incorrect labels. This is called ["negative sampling"](http://papers.nips.cc/paper/5021-distributed-representations-of-words-and-phrases-and-their-compositionality.pdf). Tensorflow has a convenient function to do this, [`tf.nn.sampled_softmax_loss`](https://www.tensorflow.org/api_docs/python/tf/nn/sampled_softmax_loss).
#
# > **Exercise:** Below, create weights and biases for the softmax layer. Then, use [`tf.nn.sampled_softmax_loss`](https://www.tensorflow.org/api_docs/python/tf/nn/sampled_softmax_loss) to calculate the loss. Be sure to read the documentation to figure out how it works.
# Number of negative labels to sample
n_sampled = 100
with train_graph.as_default():
softmax_w = tf.Variable(tf.truncated_normal((n_vocab, n_embedding), stddev=0.1))
softmax_b = tf.Variable(tf.zeros(n_vocab))
# Calculate the loss using negative sampling
loss = tf.nn.sampled_softmax_loss(softmax_w, softmax_b,
labels, embed,
n_sampled, n_vocab)
cost = tf.reduce_mean(loss)
optimizer = tf.train.AdamOptimizer().minimize(cost)
# ## Validation
#
# This code is from <NAME>'s implementation. Here we're going to choose a few common words and few uncommon words. Then, we'll print out the closest words to them. It's a nice way to check that our embedding table is grouping together words with similar semantic meanings.
with train_graph.as_default():
## From <NAME>'s implementation
valid_size = 16 # Random set of words to evaluate similarity on.
valid_window = 100
# pick 8 samples from (0,100) and (1000,1100) each ranges. lower id implies more frequent
valid_examples = np.array(random.sample(range(valid_window), valid_size//2))
valid_examples = np.append(valid_examples,
random.sample(range(1000,1000+valid_window), valid_size//2))
valid_dataset = tf.constant(valid_examples, dtype=tf.int32)
# We use the cosine distance:
norm = tf.sqrt(tf.reduce_sum(tf.square(embedding), 1, keep_dims=True))
normalized_embedding = embedding / norm
valid_embedding = tf.nn.embedding_lookup(normalized_embedding, valid_dataset)
similarity = tf.matmul(valid_embedding, tf.transpose(normalized_embedding))
# If the checkpoints directory doesn't exist:
# !mkdir checkpoints
# +
epochs = 10
batch_size = 1000
window_size = 10
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
iteration = 1
loss = 0
sess.run(tf.global_variables_initializer())
for e in range(1, epochs+1):
batches = get_batches(train_words, batch_size, window_size)
start = time.time()
for x, y in batches:
feed = {inputs: x,
labels: np.array(y)[:, None]}
train_loss, _ = sess.run([cost, optimizer], feed_dict=feed)
loss += train_loss
if iteration % 100 == 0:
end = time.time()
print("Epoch {}/{}".format(e, epochs),
"Iteration: {}".format(iteration),
"Avg. Training loss: {:.4f}".format(loss/100),
"{:.4f} sec/batch".format((end-start)/100))
loss = 0
start = time.time()
if iteration % 1000 == 0:
# note that this is expensive (~20% slowdown if computed every 500 steps)
sim = similarity.eval()
for i in range(valid_size):
valid_word = int_to_vocab[valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = int_to_vocab[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
iteration += 1
save_path = saver.save(sess, "checkpoints/text8.ckpt")
embed_mat = sess.run(normalized_embedding)
# -
# Restore the trained network if you need to:
# +
with train_graph.as_default():
saver = tf.train.Saver()
with tf.Session(graph=train_graph) as sess:
saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
embed_mat = sess.run(embedding)
# -
# ## Visualizing the word vectors
#
# Below we'll use T-SNE to visualize how our high-dimensional word vectors cluster together. T-SNE is used to project these vectors into two dimensions while preserving local stucture. Check out [this post from <NAME>](http://colah.github.io/posts/2014-10-Visualizing-MNIST/) to learn more about T-SNE and other ways to visualize high-dimensional data.
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import matplotlib.pyplot as plt
from sklearn.manifold import TSNE
# -
viz_words = 500
tsne = TSNE()
embed_tsne = tsne.fit_transform(embed_mat[:viz_words, :])
fig, ax = plt.subplots(figsize=(14, 14))
for idx in range(viz_words):
plt.scatter(*embed_tsne[idx, :], color='steelblue')
plt.annotate(int_to_vocab[idx], (embed_tsne[idx, 0], embed_tsne[idx, 1]), alpha=0.7)
| embeddings/Skip-Grams-Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
id = [1,2,3,4]; friend = [5,6,7,8]
id_list = zip(id, friend)
list(id_list)
id_list_2 = [(1, 5), (2, 6), (3, 7), (4, 8)]
sorted(id_list_2, reverse=True)
for i, j in zip(id, friend):
print(i, j)
# +
alist = ['a1', 'a2', 'a3']
blist = ['b1', 'b2', 'b3']
for a, b in zip(alist, blist):
print( a, b)
# -
c = [1,6,2,7,8,3,2]
sorted(c)
from collections import Counter, defaultdict
# ?defaultdict
def sum_prod(x,y):
return (x+y), (x*y)
sum_prod(4,8)
d = Counter('abcdeabcdabcaba')
d.most_common(3)
c
| data-science-from-scratch/Ch 1 introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # 2.0: Reproducible Data Sources
# "In God we trust. All others must bring data.” – <NAME>"
# %load_ext autoreload
# %autoreload 2
import logging
from src.logging import logger
logger.setLevel(logging.INFO)
# # Introducing the `DataSource`
# The `DataSource` object handles downloading, unpacking, and processing raw data files, and serves as a container for some basic metadata about the raw data, including **documentation** and **license** information.
#
# Raw data files are downloaded to `paths.raw_data_path`.
# Cache files and unpacked raw files are saved to `paths.interim_data_path`.
#
# ## Example: LVQ-Pak, a Finnish phonetic dataset
# The Learning Vector Quantization (lvq-pak) project includes a simple Finnish phonetic dataset
# consisting 20-dimensional Mel Frequency Cepstrum Coefficients (MFCCs) labelled with target phoneme information. Our goal is to explore this dataset, process it into a useful form, and make it a part of a reproducible data science workflow. The project can be found at: http://www.cis.hut.fi/research/lvq_pak/
#
#
#
# For this example, we are going create a `DataSource` for the LVQ-Pak dataset. The process will consist of
# 1. Downloading and unpacking the raw data files.
# 2. Generating (and recording) hash values for these files.
# 3. Adding LICENSE and DESCR (description) metadata to this DataSource
# 4. Adding the complete `DataSource` to the Catalog
#
# ### Downloading Raw Data Source Files
from src.data import DataSource
from src.utils import list_dir
from src import paths
# Create a data source object
datasource_name = 'lvq-pak'
dsrc = DataSource(datasource_name)
# Add URL(s) for raw data files
dsrc.add_url("http://www.cis.hut.fi/research/lvq_pak/lvq_pak-3.1.tar")
# Fetch the files
logger.setLevel(logging.DEBUG)
dsrc.fetch()
# By default, data files are downloaded to the `paths.raw_data_path` directory:
# !ls -la $paths.raw_data_path
# Since we did not specify a hash, or target filename, these are inferred from the downloaded file:
dsrc.file_list
# ### Remove a file from the file_list
# Note that if we add a url again, we end up with more of the same file in the file list
dsrc.add_url("http://www.cis.hut.fi/research/lvq_pak/lvq_pak-3.1.tar")
dsrc.file_list
dsrc.fetch()
# Fetch is smart enough to not redownload the same file in this case. Still, this is messy and cumbersome. We can remove entries by removing them from the `file_list`.
dsrc.file_list.pop(1)
dsrc.file_list
dsrc.fetch(force=True)
# ### Sometimes we make mistakes when entering information
dsrc.add_url("http://www.cis.hut.fi/research/lvq_pak/lvq_pak-3.1.tar", name='cat', file_name='dog')
dsrc.file_list
dsrc.fetch()
# !ls -la $paths.raw_data_path
# We now have a copy of `lvq_pak-3.1.tar` called `dog`. Every time we fetch, we will fetch twice unless we get rid of the entry for `dog`.
#
# First, we will want to remove `dog` from our raw data.
#
# Let's take the "Nuke it from orbit. It's the only way to be sure" approach and clean our entire raw data directory.
# !cd .. && make clean_raw
# !ls -la $paths.raw_data_path
# The other option would have been to manually remove the `dog` file and then forced a refetch.
# ### Exercise: Remove the entry for dog and refetch
# You should now only see the lvq_pak-3.1.tar file
# !ls -la $paths.raw_data_path
# #### Cached Downloads
# The DataSource object keeps track of whether the fetch has been performed successfully. Subsequent downloads will be skipped by default:
dsrc.fetch()
# We can override this, which will check if the downloaded file exists, redownloading if necessary
dsrc.fetch(force=True)
# In the previous case, the raw data file existed on the filesystem, and had the correct hash. If the local file has a checksum that doesn't match the saved hash, it will be re-downloaded automatically. Let's corrupt the file and see what happens.
# !echo "XXX" >> $paths.raw_data_path/lvq_pak-3.1.tar
dsrc.fetch(force=True)
# ## Exercise: Creating an F-MNIST `DataSource`
# For this excercise, you are going build a `DataSource` out of the Fashion-MNIST dataset.
#
# [Fashion-MNIST][FMNIST] is available from GitHub. Looking at their [README], we see that the raw data is distributed as a set of 4 files with the following checksums:
#
# [FMNIST]: https://github.com/zalandoresearch/fashion-mnist
# [README]: https://github.com/zalandoresearch/fashion-mnist/blob/master/README.md
#
# | Name | Content | Examples | Size | Link | MD5 Checksum|
# | --- | --- |--- | --- |--- |--- |
# | `train-images-idx3-ubyte.gz` | training set images | 60,000|26 MBytes | [Download](http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz)|`8d4fb7e6c68d591d4c3dfef9ec88bf0d`|
# | `train-labels-idx1-ubyte.gz` | training set labels |60,000|29 KBytes | [Download](http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz)|`25c81989df183df01b3e8a0aad5dffbe`|
# | `t10k-images-idx3-ubyte.gz` | test set images | 10,000|4.3 MBytes | [Download](http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz)|`bef4ecab320f06d8554ea6380940ec79`|
# | `t10k-labels-idx1-ubyte.gz` | test set labels | 10,000| 5.1 KBytes | [Download](http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz)|`bb300cfdad3c16e7a12a480ee83cd310`|
#
# By the end of this running example, you will build a `DataSource` that downloads these raw files and verifies that the hash values are as expected. You should make sure to include **Description** and **License** metadata in this `DataSource`. When you are finished, save the `DataSource` to the Catalog.
# ### Exercise: Download Raw Data Source Files for F-MNIST
# Create an fmnist data source object
# Add URL(s) for raw data files
# Note that you will be adding four files to the DataSource object
# and that the hash values have already been provided above!
# Fetch the files
# Check for your new files
# !ls -la $paths.raw_data_path
# ### Unpacking Raw Data Files
unpack_dir = dsrc.unpack()
# By default, files are decompressed/unpacked to the `paths.interim_data_path`/`datasource_name` directory:
# !ls -la $paths.interim_data_path
# +
# We unpack everything into interim_data_path/datasource_name, which is returned by `unpack()`
# -
# !ls -la $unpack_dir
# !ls -la $unpack_dir/lvq_pak-3.1
# ### Exercise: Unpack raw data files for F-MNIST
# Check for your files in the unpacked dirs
# ### Adding Metadata to Raw Data
# Wait, what have we actually downloaded, and are we actually allowed to **use** this data? We keep track of two key pieces of metadata along with a raw dataset:
# * Description (`DESCR`) Text: Human-readable text describing the dataset, its source, and what it represents
# * License (`LICENSE`) Text: Terms of use for this dataset, often in the form of a license agreement
# Often, a dataset comes complete with its own README and LICENSE files. If these are available via URL, we can add these like we add any other data file, tagging them as metadata using the `name` field:
dsrc.add_url("http://www.cis.hut.fi/research/lvq_pak/README",
file_name='lvq-pak.readme', name='DESCR')
dsrc.fetch()
dsrc.unpack()
# We now fetch 2 files. Note the metadata has been tagged accordingly in the `name` field
dsrc.file_list
# We need to dig a little deeper to find the license. we find it at the beginning of the README file contained within that distribution:
# !head -35 $paths.interim_data_path/lvq-pak/lvq_pak-3.1/README
# Rather than trying to be clever, let's just add the license metadata from a python string that we cut and paste from the above.
license_txt = '''
************************************************************************
* *
* LVQ_PAK *
* *
* The *
* *
* Learning Vector Quantization *
* *
* Program Package *
* *
* Version 3.1 (April 7, 1995) *
* *
* Prepared by the *
* LVQ Programming Team of the *
* Helsinki University of Technology *
* Laboratory of Computer and Information Science *
* Rakentajanaukio 2 C, SF-02150 Espoo *
* FINLAND *
* *
* Copyright (c) 1991-1995 *
* *
************************************************************************
* *
* NOTE: This program package is copyrighted in the sense that it *
* may be used for scientific purposes. The package as a whole, or *
* parts thereof, cannot be included or used in any commercial *
* application without written permission granted by its producents. *
* No programs contained in this package may be copied for commercial *
* distribution. *
* *
* All comments concerning this program package may be sent to the *
* e-mail address '<EMAIL>'. *
* *
************************************************************************
'''
dsrc.add_metadata(contents=license_txt, kind='LICENSE')
# Under the hood, this will create a file, storing the creation instructions in the same `file_list` we use to store the URLs we wish to download:
dsrc.file_list
# Now when we fetch, the license file is created from this information:
logger.setLevel(logging.DEBUG)
dsrc.fetch(force=True)
dsrc.unpack()
# !ls -la $paths.raw_data_path
# ### Exercise: Add metadata to F-MNIST
# ### Adding Raw Data to the Catalog
from src import workflow
workflow.available_datasources()
workflow.add_datasource(dsrc)
workflow.available_datasources()
# We will make use of this raw dataset catalog later in this tutorial. We can now load our `DataSource` by name:
ds = DataSource.from_name('lvq-pak')
ds.file_list
# ### Exercise: Add F-MNIST to the Raw Dataset Catalog
# Your fmnist dataset should now show up here:
workflow.available_datasources()
# ### Nuke it from Orbit
#
# Now we can blow away all the data that we've downloaded and set up so far, and recreate it from the workflow datasource. Or, use some of our `make` commands!
# !cd .. && make clean_raw
# !ls -la $paths.raw_data_path
# !cd .. && make fetch_sources
# !ls -la $paths.raw_data_path
# What about fetch and unpack?
# !cd .. && make clean_raw && make clean_interim
# !ls -la $paths.raw_data_path
# !cd .. && make unpack_sources
# !ls -la $paths.raw_data_path
# !ls -la $paths.interim_data_path
# ### Your data is now reproducible!
| notebooks/20-creating-datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Центр непрерывного образования
#
# # Программа «Python для автоматизации и анализа данных»
#
# ## Методы строк
#
# Неделя 2 - 2
#
# *Автор: <NAME>, НИУ ВШЭ*\
# *Дополнения: <NAME>, Яндекс.Маркет*
# ## Строки: срезы и методы
# Строки (strings) в питоне предназначаются для хранения и отображения текстовой информации.
# Строки представляют собой последовательность символов, которые имеют свой порядок,
# что значит, что питон отслеживает положение каждого элемента строки. Это позволит нам, например,
# удобно индексировать строки. Для создания строки нужно использовать одинарные или двойные кавычки.
# Попробуем создать строку.
# +
s = 'Welcome to Brasil!' # одинарные кавычки
print(s)
s = "Welcome to Brasil!" # Двойные кавычки
print(s)
# -
# Если мы хотим в строке использовать кавычки, например, для какого-то названия,
# то следует использовать разные кавычки для текста внутри строки и для самой страны.
s = 'Welcome to "Brasil!"' # Правильно так (или наоборот - двойные снаружи и одинарные внтури)
print(s)
# Также можно посчитать количество символов в строке (ее длину) с помощью функции len()
f = 'gfhdsjk. '
len(f)
# Строка представляет собой последовательность, а это значит, что мы можем обратиться к любому ее элементу по индексу.
# Для выполнения такой операции в питоне используются квадратные скобки [] после объекта.
# В квадратных скобках указывается желаемый индекс. Индексирование начинается с 0.
# +
print(s[0]) # первый элемент
print(s[1]) # второй
print(s[2]) # третий
print(s[-1]) # последний
print(s[-2]) # второй с конца
#Предыдущие действия никак не изменили строку
print(s)
# -
# Кроме выбора одного элемента с помощью индексирования можно получить подстроку.
# Для этого надо указать индексы границ подстроки через двоеточие.
#
# Первое число - от какого индекса начинаем, если ничего не написать, то начнем сначала. Этот индекс включается в отрезок. Второе число (после первого двоеточия) - каким индексом заканчивается срез, если ничего не написать, то питон возьмет последний символ. Здесь уже не включительно - если вы укажите срез [0:2], то питон вернет только два символа под индексами 0 и 1. Третье число (необязательное, после второго двоеточия) - шаг, по умолчанию там стоит параметр 1 (выводится каждая буква).
#
# Таким образом, использовав одно число без двоеточий мы получим один символ. Использовав два числа через двоеточие - срез строки, включая первый индекс и не включая второй (первое число обязательно меньше второго). Использовав три числа через два двоеточих - срез строки с определенным шагом, заданным третьим числом.
s
s = 'asd'
s[0::len(s)-1]
print(s[1:])
print(s[:4]) # четыре первых символа до порядкового номера 4
print(s[:]) # копия строки (срез от первого до последнего символа)
print(s[:-1]) # вся строка кроме последнего символа
print(s[::2]) # также можно выбирать символы из строки с каким-то шагом
print(s[::-1]) # например, с помощью шага -1 можно получить строку наоборот
# Стоит заметить, что строка - это неизменяемый объект. Если вдруг вы хотите заменить первую букву строки - ничего не выйдет.
# ## Методы строк
# Теперь перейдем к задачам поинтереснее. Пусть нам нужно найти в строке каое-нибудь слово. Мы можем проверять наличие подстроки в строке с помощью специального слова in.
# +
lyrics = '''The unlocked variant of the Galaxy Z Flip was quickly sold in stores as well
as on Samsung’s website in the US after the launch. The company had also claimed
that it was overwhelmed with the response it received from consumers for its second
foldable phone. Now, the South Korean smartphone giant says that the Galaxy Z Flip
will be back in stock in the US tomorrow.'''
print('smartphone' in lyrics)
# -
# Если бы новостей у нас было много, мы бы могли написать код, который заходил бы в каждую из них и прибавлял бы новость к числу упоминаний, если название компании в ней встречалось. Пока мы не умеем работать с типами данных, которые могут содержать в себе несколько строк, поэтому давайте еще раз потренируемся писать цикл while. Предположим, что с утра нам на оценку прилетело пять новостей.
# +
mentions = 0 # переменная, в которой будем считать упоминания
news_number = 1 # счетчик новостей, начинаем работать с первой
while news_number <= 5:
news = input()
if 'Samsung' in news:
mentions += 1
news_number += 1
print(mentions)
# -
# Обратите внимание, что строка, в которой samsung написан с маленькой буквы, не учлась. Напоминаю, что питон чувствителен к регистру. Бороться с этой бедой научимся чуть ниже.
#
# Работая со строками, мы будем использовать очень много методов. Методы отличаются от функций тем, что вызываются от имени переменной через точку. Например news.upper() - метод upper() вызывается от строковой переменной news. По сути методы, это функции, которые применимы только к особому типу данных. Так, например, функция print() напечатает все, что бы мы ей не передали, а перевод к верхнему регистру (а именно это делает метод upper()) ни с одним типом данных кроме строки уже не сработает.
# +
news = '''The unlocked variant of the Galaxy Z Flip was quickly sold in stores as well
as on Samsung’s website in the US after the launch. The company had also claimed
that it was overwhelmed with the response it received from consumers for its second
foldable phone. Now, the South Korean smartphone giant says that the Galaxy Z Flip
will be back in stock in the US tomorrow.'''
print(news.upper()) # приводит строку к верхнему регистру
print(news.lower()) # приводит строку к нижнему регистру
# -
# Обратите внимание, что метод, как правило, не изменяет объект и наша строка осталась такой как была.
news
# Мы с вами рассмотрим самые основные методы работы со строками, которые в будущем позволят нам решать более интересные задачи.
#
# **find()**
# Метод find() возвращает индекс вхождения символа в строку (или индекс первого символа подстроки в строке, если мы ищем подстроку). Зная индекс первого элемента, мы можем достать интересующую нас информацию.
#
# Например, мы скачали с сайта информацию о цене нового планшета и хотим достать оттуда собственно цену. Мы знаем, что цена идет после подстроки "ЦЕНА:" и что после самой цены идет постфикс "руб."
info = 'iPad 64 GB ЦЕНА: 39 990 руб. Скидка: 5% ЦЕНА:'
print(info.find('ЦЕНА:')) # нашли индекс Ц - начала подстроки "ЦЕНА:"
print(info.find('руб.')) # нашли индекс р
price = info[info.find('ЦЕНА:')+6:info.find('руб.')-1] # вывели срез от от начала до конца цены (с помощью
# слогаемых 6 и -1 откорретировали индексы до начала и конца цены)
print(price)
# Кстати, если в строке нет искомой подстроки, то выведется -1. Это не значит, что первый символ находится на последнем месте, это значит, что такой подстроки в строке не встречается.
d = 'fgh ooo'
# А если подстрока входит в строку несколько раз, то find() вернет индекс только для первого вхождения.
info.find('9')
# Есть модификация метода find(): rfind(substring) - возвращает позицию самого правого вхождения подстроки substring в строку string или -1, если подстрока не найдена.
type(price)
price
price = price.replace(' ', '')
int(price)
# Продолжим с нашим мини-парсингом. Нашли цену и с ней уже почти можно работать (например, мы хотим что-то считать). Но пробел в середине не даст нам сделать из нее целое число. Тут на помощь приходит метод **replace()**. С помощью него мы можем заменить символ или подстроку в строке. Метод принимает два аргумента - что меняем и на что меняем.
print(price.replace(' ', '')) # меняем пробел на "ничего"
int(price.replace(' ', '')) * 2 # ура! теперь можно перевести цену в int и узнать, сколько стоят два планшета
# Метод replace() заменяет все вхождения, если мы не укажем ограничение специальным третьим аргументом, который как раз ограничивает количество замен.
print(price.replace('9', '1')) # заменяем на 1 все девятки
print(price.replace('9', '1', 2)) # заменяет на 1 только первые две девятки
# Рассмотрим следующую задачу: у нас есть строка, в которой собраны данные ответа на вопрос
# о качестве оказанной услуги. Каждый ответ отделен точкой с запятой от следующего.
# Требуется напечатать каждый ответ на новой строке.
# +
s = 'Хорошо;Удовлетворительно;Можно было и лучше;И так сойдет;Восхитительно!'
while len(s) > 0: # будем отрезать от нашей строки элементы, пока она не станет равна нулю
idx = s.find(';') # ищем индекс первого вхождения ;
if idx == -1: # если ; нет, то печатаем строку
print(s)
#s = '' # заменяем строку на пустую строку длины 0, чтобы цикл не запустился на следующую итерацию
break
else:
print(s[:idx]) # если нашли ;, то печатаем строку от начала и до ;
s = s[idx+1:] # а теперь отрезаем тот кусок, который только что напечатали и сам ; (для этого +1)
# это делаем для того, чтобы на вход в цикл пришла строка уже без этого куска
# -
# Конечно, это не все. У строк есть еще множестве методов, которые позволяют искать паттерны и как-то их редактировать. Методы **startswith()** и **endswith()** проверяют, стоит ли искомая построка в начале или в конце строки.
s = '''We the People of the United States, in Order to form a more perfect Union,
establish Justice, insure domestic Tranquility, provide for the common defence,
promote the general Welfare, and secure the Blessings of Liberty to ourselves and
our Posterity, do ordain and establish this Constitution for the United States of America.'''
print(s)
print(s.startswith('We')) # данный метод проверяет, оканчивается ли исходная строка на данную подстроку
# Пусть у нас есть отзыв посетителя о кафе. Мы заранее знаем, что пользователь
# для отзыва выбирает только последнее слово из предложенных двух: 'good', 'bad'.
# Попробуем оценить, остался ли доволен клиент.
# +
feedback = 'This place was bad.' # сам отзыв
if feedback.endswith('bad.'): # если строка заканчивается на 'bad'
print('Client was disappointed') # то клиент расстроен
else:
print('Client was satisfied') # иначе - клиенту все понравилось
# -
# Теперь усложним задачу. Что если слово находится не в конце предложения? Попробуем его найти!
#
# +
feedback2 = 'This place was bad enough'
if feedback2.find('bad') != -1:
print('Client was disappointed') # то клиент расстроен
else:
print('Client was satisfied') # иначе - клиенту все понравилось
# -
# Метод **strip()** (и его собратья **lstrip()** и **rstrip()**, работающие только с одной стороны строки, удаляет незначимые символы (пробелы, табуляцию и т.д.) с краев строк. Очень полезный метод, когда мы собираем информацию из интернета. Если этим методам передать аргумент, то они удалят подстроку.
print(' 135133 '.strip()) # удалили пробельные символы слева и справа
print('ruhse.ru'.strip('ru')) # удалили ru с обеих сторон
print('ruhse.ru'.lstrip('ru')) # удалили ru слева
print('ruhse.ru'.rstrip('ru')) # удалили ru справа
# В реальности strip часто используется для нормализации email'ов или дргуих идентификторов
print(' <EMAIL> '.strip())
# Теперь научимся считать количество вхождений подстроки в строку с помощью метода **count()**
#
s = "Mushroooom soup" # исходная строка
print(s.count("O")) # ищем заглавную букву О, не находим
print(s.count("o")) # ищем строчную букву о, находим 5 штук
print(s.count("oo")) # ищем две буквы о подряд, находим две таких подстроки
print(s.count("ooo")) # ищем три букв о подряд, находим одно такое вхождение
print(s.count("push")) # ищем подстроку 'push', не находим
print(s.count("o", 4, 7)) # ищем букву о в s[4:7]
print(s.count("o", 7)) # ищем букву о в s[7:]
# Отдельное семейство методов строк отвечает за проверку на соответствие условиям.
print('ask me a question!'.islower())
# +
# isalpha - проверяет, что все символы строки являются буквами.
print('Ask me a question!'.isalpha())
print('Ask'.isalpha())
# isdigit - проверяет, что все символы строки являются цифрами.
print('13242'.isdigit())
# isalnum - проверяет, что все символы строки являются буквами или цифрами.
print('Ask me a question!'.isalnum())
print('Ask232'.isalnum())
# islower - проверяет, что все символы строки являются маленькими (строчными) буквами.
print('ask me a question!'.islower())
# isupper - проверяет, что все символы строки являются большими (заглавными, прописными) буквами.
print('Ask me a question!'.isupper())
# -
# На этом методы строк не заканчиваются) Например, можно менять регистр букв или переводить в верхний регистр первые буквы слов
# +
# title - переводит первую букву всех слов к верхнему регистру
print('ask, hgfjdk? me a question!'.title())
# swapcase - меняет регистр на противоположный
print('ask me a question!'.swapcase())
# -
info = 'iPad 64 GB ЦЕНА: 39 990 руб. Скидка: 5%'
# capitalize - Переводит первую букву строки в верхний регистра, а остальные - в нижний
info.capitalize()
# Может возникнуть вопрос: а зачем нам проверять, из каких символов состоит строка? Ведь даже, если строка состоит из цифр, числом она автоматически не станет. Давайте рассмотрим две ситуации, в которых очень полезно знать, какие символы входят в нашу строку.
#
# **Ситуация**
#
# Пользователь должен придумать пароль для своей учетной записи. Пароль должен состоять только из цифр и букв.
# +
password = input("Введите пароль: ")
if password.isalnum() == False:
print("Пароль должен состоять только из букв и цифр!")
# -
# А теперь давайте усложним и добавим проверку на то, что хотя бы одна буква должна быть заглавная.
# +
password = input("Введите пароль: ")
if password.isalnum() == False:
print("Пароль должен состоять только из букв и цифр!")
if password.islower() == True:
print("Пароль должен содержать как минимум одну заглавную букву!")
# -
| 01 python/lect 3 materials/2020_DPO_3_0_strings_methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
org_list = [1, 2, 3, 4, 5]
org_list.reverse()
print(org_list)
print(org_list.reverse())
org_list = [1, 2, 3, 4, 5]
reverse_iterator = reversed(org_list)
print(org_list)
print(type(reverse_iterator))
for i in reversed(org_list):
print(i)
new_list = list(reversed(org_list))
print(org_list)
print(new_list)
org_list = [1, 2, 3, 4, 5]
new_list = org_list[::-1]
print(org_list)
print(new_list)
org_str = 'abcde'
new_str_list = list(reversed(org_str))
print(new_str_list)
new_str = ''.join(list(reversed(org_str)))
print(new_str)
new_str = org_str[::-1]
print(new_str)
org_tuple = (1, 2, 3, 4, 5)
new_tuple = tuple(reversed(org_tuple))
print(new_tuple)
new_tuple = org_tuple[::-1]
print(new_tuple)
| notebook/reverse_reversed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:anaconda]
# language: python
# name: conda-env-anaconda-py
# ---
# ### *IPCC SR15 scenario assessment*
#
# <img style="float: right; height: 80px; padding-left: 20px;" src="../_static/IIASA_logo.png">
# <img style="float: right; height: 80px;" src="../_static/IAMC_logo.jpg">
#
# # Analysis of short-lived non-CO2 emissions
#
# This notebook plots emissions of CH4, F-gases, BC and SO2 as shown in **Figure 2.7**
# of the IPCC's _"Special Report on Global Warming of 1.5°C"_.
#
# The scenario data used in this analysis can be accessed and downloaded at [https://data.ene.iiasa.ac.at/iamc-1.5c-explorer](https://data.ene.iiasa.ac.at/iamc-1.5c-explorer).
# ## Load `pyam` package and other dependencies
# +
import pandas as pd
import numpy as np
import warnings
import io
import itertools
import yaml
import math
import matplotlib.pyplot as plt
plt.style.use('style_sr15.mplstyle')
# %matplotlib inline
import pyam
from utils import boxplot_by_cat
# -
# ## Import scenario data, categorization and specifications files
#
# The metadata file must be generated from the notebook `sr15_2.0_categories_indicators` included in this repository.
# If the snapshot file has been updated, make sure that you rerun the categorization notebook.
#
# The last cell of this section loads and assigns a number of auxiliary lists as defined in the categorization notebook.
sr1p5 = pyam.IamDataFrame(data='../data/iamc15_scenario_data_world_r1.1.xlsx')
sr1p5.load_metadata('sr15_metadata_indicators.xlsx')
# +
with open("sr15_specs.yaml", 'r') as stream:
specs = yaml.load(stream, Loader=yaml.FullLoader)
rc = pyam.run_control()
for item in specs.pop('run_control').items():
rc.update({item[0]: item[1]})
cats = specs.pop('cats')
all_cats = specs.pop('all_cats')
subcats = specs.pop('subcats')
all_subcats = specs.pop('all_subcats')
plotting_args = specs.pop('plotting_args')
marker= specs.pop('marker')
# -
# ## Downselect scenario ensemble to categories of interest for this assessment
years = [2010, 2030, 2050]
cats.remove('Above 2C')
sr1p5.meta.rename(columns={'Kyoto-GHG|2010 (SAR)': 'kyoto_ghg_2010'}, inplace=True)
df = sr1p5.filter(kyoto_ghg_2010='in range', category=cats, year=years)
# ## Set specifications for filter and plotting
save_name = 'output/fig2.7{}.{}'
filter_args = dict(df=sr1p5, category=cats, marker=None, join_meta=True)
compare_years = [2030, 2050]
base_year = 2010
def plotting_args(name, filetype='png'):
return {'categories': cats, 'column': 'category', 'years': years,
'add_marker': marker,
'save': save_name.format(name, filetype)}
data = []
# ## Plot different emissions pathways by category
ch4 = df.filter(variable='Emissions|CH4').timeseries()
name = 'ch4'
label = 'Global CH4 emissions'
unit = 'MtCH4/yr'
_data = pyam.filter_by_meta(ch4, **filter_args)
fig = boxplot_by_cat(_data, ylabel='{} ({})'.format(label, unit),
**plotting_args('a_{}'.format(name)))
_data['species'] = name
data.append(_data)
fgases = df.filter(variable='Emissions|F-Gases').timeseries()
# +
name = 'f-gases'
label = 'Global F-gas emissions'
unit = 'GtCO2e/yr'
_data = pyam.filter_by_meta(fgases, **filter_args)
fig = boxplot_by_cat(_data, ylabel='{} ({})'.format(label, unit),
**plotting_args('b_{}'.format(name)), legend=False)
_data['species'] = name
data.append(_data)
# -
bc = df.filter(variable='Emissions|BC').timeseries()
# +
name = 'bc'
label = 'Global BC emissions'
unit = 'MtBC'
_data = pyam.filter_by_meta(bc, **filter_args)
fig = boxplot_by_cat(_data, ylabel='{} ({})'.format(label, unit),
**plotting_args('c_{}'.format(name)), legend=False)
_data['species'] = name
data.append(_data)
# -
so2 = df.filter(variable='Emissions|Sulfur').timeseries()
name = 'so2'
label = 'Global SO2 emissions'
unit = 'MtSO2'
_data = pyam.filter_by_meta(so2, **filter_args)
fig = boxplot_by_cat(_data, ylabel='{} ({})'.format(label, unit),
**plotting_args('d_{}'.format(name)), legend=False)
_data['species'] = name
data.append(_data)
# ## Export timeseries data to `xlsx`
data = pd.concat(data).set_index(['species', 'category', 'marker'], append=True)
data.head()
data.reset_index().to_excel('output/fig2.7_data_table.xlsx')
| assessment/sr15_2.3.3_short-lived_climate_forcers.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Example: Compare CZT to FFT
# %load_ext autoreload
# %autoreload 2
# +
import numpy as np
import matplotlib.pyplot as plt
# CZT package
import czt
# https://github.com/garrettj403/SciencePlots
plt.style.use(['science', 'notebook'])
# -
# # Generate Time-Domain Signal for Example
# +
# Time data
t = np.arange(0, 20, 0.1) * 1e-3
dt = t[1] - t[0]
Fs = 1 / dt
N = len(t)
print("Sampling period: {:5.2f} ms".format(dt * 1e3))
print("Sampling frequency: {:5.2f} kHz".format(Fs / 1e3))
print("Nyquist frequency: {:5.2f} kHz".format(Fs / 2 / 1e3))
print("Number of points: {:5d}".format(N))
# +
# Signal data
def model1(t):
"""Exponentially decaying sine wave with higher-order distortion."""
output = (1.0 * np.sin(2 * np.pi * 1e3 * t) +
0.3 * np.sin(2 * np.pi * 2.5e3 * t) +
0.1 * np.sin(2 * np.pi * 3.5e3 * t)) * np.exp(-1e3 * t)
return output
def model2(t):
"""Exponentially decaying sine wave without higher-order distortion."""
output = (1.0 * np.sin(2 * np.pi * 1e3 * t)) * np.exp(-1e3 * t)
return output
sig = model1(t)
# -
# Plot time-domain data
plt.figure()
t_tmp = np.linspace(0, 6, 601) / 1e3
plt.plot(t_tmp*1e3, model1(t_tmp), 'k', lw=0.5, label='Data')
plt.plot(t*1e3, sig, 'ro--', label='Samples')
plt.xlabel("Time (ms)")
plt.ylabel("Signal")
plt.xlim([0, 6])
plt.legend()
plt.title("Time-domain signal");
# # Frequency-domain
# +
sig_fft = np.fft.fftshift(np.fft.fft(sig))
f_fft = np.fft.fftshift(np.fft.fftfreq(N, d=dt))
freq, sig_f = czt.time2freq(t, sig)
plt.figure()
plt.plot(f_fft / 1e3, np.abs(sig_fft), 'k', label='FFT')
plt.plot(freq / 1e3, np.abs(sig_f), 'ro--', label='CZT')
plt.xlabel("Frequency (kHz)")
plt.ylabel("Signal magnitude")
plt.xlim([f_fft.min()/1e3, f_fft.max()/1e3])
plt.legend()
plt.title("Frequency-domain")
plt.savefig("results/freq-domain.png", dpi=600)
plt.figure()
plt.plot(f_fft / 1e3, np.angle(sig_fft), 'k', label='FFT')
plt.plot(freq / 1e3, np.angle(sig_f), 'ro--', label='CZT')
plt.xlabel("Frequency (kHz)")
plt.ylabel("Signal phase")
plt.xlim([f_fft.min()/1e3, f_fft.max()/1e3])
plt.legend()
plt.title("Frequency-domain");
| examples/compare-czt-fft.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #### 다음과 같은 데이터 프레임을 만들고 4번째 행의 값을 출력하시오.
df = pd.DataFrame(np.random.randint(10, size = (4, 2)), index = list("ABCD"), columns = ["first", "second"])
df
df.iloc[3]
# ## sWAP cASE
# 해석이 잘 이해가 안되면 사이트 링크 방문하여 참고하세요!
# https://www.hackerrank.com/challenges/swap-case/problem
# 주어진 문장을 대문자는 소문자로, 소문자는 대문자로 바꿔 출력하시면 됩니다
# #### 예시
# - Www.HackerRank.com → wWW.hACKERrANK.COM
# - Pythonist 2 → pYTHONIST 2
# 입력값( 2가지 모두 돌려 통과할 시 통과)
# - Www.HackerRank.com → wWW.hACKERrANK.COM
# - HackerRank.com presents "Pythonist 2".
problem1 = "Www.HackerRank.com"
problem2 = "Pythonist 2"
problem3 = "HackerRank.com presents Pythonist 2"
def swap_case(text) :
result = []
for i in text :
if i.upper() == i :
result.append(i.lower())
else :
result.append(i.upper())
return "".join(result)
print(swap_case(problem1))
print(swap_case(problem2))
print(swap_case(problem3))
# 숫자맞추기게임
# - 1에서 100사이의 숫자를 맞춰보세요.
# - 라는 문구와 함께 숫자를 입력하고
# - 그 숫자가 랜덤으로 생성된 숫자보다 크면 "Your number is too big"라고
# - 출력되고 작으면 "Your number is too small"라고 출력되고 맞추게 되면 "Congratulations:) You got it!"이라고 출력되서 맞출때까지 반복되는 코딩을 짜보세요.
# +
import random
number = random.randint(1, 100)
while True :
guess = int(input("1에서 100사이의 숫자를 맞춰보세요. "))
if guess > number :
print("Your number is too big")
elif guess < number :
print("Yout number is too small")
elif guess == number :
print("Congratulations:) You got it!")
break
# -
# # 주어진 문자열(공백 없이 쉼표로 구분되어 있음)을 가지고 아래 문제에 대한 프로그램을 작성하세요.
#
# names = "이유덕,이재영,권종표,이재영,박민호,강상희,이재영,김지완,최승혁,이성연,박영서,박민호,전경헌,송정환,김재성,이유덕,전경헌"
#
# #1. 김씨와 이씨는 각각 몇 명 인가요?
# #2. 중복을 제거한 이름을 출력하세요.
names = "이유덕,이재영,권종표,이재영,박민호,강상희,이재영,김지완,최승혁,이성연,박영서,박민호,전경헌,송정환,김재성,이유덕,전경헌"
# +
name_ls = names.split(',')
Lee = []
Kim = []
for i in range(len(name_ls)) :
if name_ls[i][0] == "이" :
Lee.append(name_ls[i])
elif name_ls[i][0] == "김" :
Kim.append(name_ls[i])
print("김 씨는 {}명 입니다.".format(len(Kim)))
print("이 씨는 {}명 입니다.".format(len(Lee)))
# -
name_list = names.split(',')
name_list = list(set(name_list))
", ".join(name_list)
# #### 다음과 같은 데이터 프레임을 만들고 4번째 행의 값을 출력하시오.
# ### pandas 데이터 분석
#
# tips = sns.load_dataset("tips")
# - tips 데이터를 다운 받아서
# - sex, tip, total_bill 의 컬럼을 가지는 data frame을 만들고 tip_rate컬럼을 추가해서 tip의 비율 데이터를 넣으세요
import seaborn as sns
tips = sns.load_dataset("tips")
tips.tail()
# +
df = pd.DataFrame(tips, columns = ["sex","tip", "total_bill"])
df["tip_rate"] = round((df.total_bill / (df.tip + df.total_bill)) * 100, 2)
df.tail()
# -
df.groupby("sex").size().reset_index(name = 'count')
index = pd.date_range("20180101", periods = 5)
index
df = pd.DataFrame(index = index, columns = ["Apples", "Oranges"])
df.Apples = [32, 10, 42, 27, 36]
df.Oranges = [10, 40, 19, 16, 18]
df["Total fruit"] = df.Apples + df.Oranges
df
df.plot()
plt.title("Fruit sales title")
plt.xlabel("Date")
plt.ylabel("Number of fruit")
plt.show()
import seaborn as sns
sns.set()
plt.xticks(rotation = 17)
plt.plot(df.Apples, label = "Apple")
plt.plot(df.Oranges, label = "Orange")
ax = plt.plot(df["Total fruit"], label = "Total fruit")
plt.title("Fruit sales title")
plt.xlabel("Date")
plt.ylabel("Number of fruit")
plt.legend(loc = 2)
plt.show()
| Hacker_rank/Easy/180201_Study Quiz.ipynb |
# # Many-body perturbation theory
#
# We assume here that we are only interested in the ground state of the system and
# expand the exact wave function in term of a series of Slater determinants
# $$
# \vert \Psi_0\rangle = \vert \Phi_0\rangle + \sum_{m=1}^{\infty}C_m\vert \Phi_m\rangle,
# $$
# where we have assumed that the true ground state is dominated by the
# solution of the unperturbed problem, that is
# $$
# \hat{H}_0\vert \Phi_0\rangle= W_0\vert \Phi_0\rangle.
# $$
# The state $\vert \Psi_0\rangle$ is not normalized, rather we have used an intermediate
# normalization $\langle \Phi_0 \vert \Psi_0\rangle=1$ since we have $\langle \Phi_0\vert \Phi_0\rangle=1$.
#
#
#
# The Schroedinger equation is
# $$
# \hat{H}\vert \Psi_0\rangle = E\vert \Psi_0\rangle,
# $$
# and multiplying the latter from the left with $\langle \Phi_0\vert $ gives
# $$
# \langle \Phi_0\vert \hat{H}\vert \Psi_0\rangle = E\langle \Phi_0\vert \Psi_0\rangle=E,
# $$
# and subtracting from this equation
# $$
# \langle \Psi_0\vert \hat{H}_0\vert \Phi_0\rangle= W_0\langle \Psi_0\vert \Phi_0\rangle=W_0,
# $$
# and using the fact that the both operators $\hat{H}$ and $\hat{H}_0$ are hermitian
# results in
# $$
# \Delta E=E-W_0=\langle \Phi_0\vert \hat{H}_I\vert \Psi_0\rangle,
# $$
# which is an exact result. We call this quantity the correlation energy.
#
#
#
# This equation forms the starting point for all perturbative derivations. However,
# as it stands it represents nothing but a mere formal rewriting of Schroedinger's equation and is not of much practical use. The exact wave function $\vert \Psi_0\rangle$ is unknown. In order to obtain a perturbative expansion, we need to expand the exact wave function in terms of the interaction $\hat{H}_I$.
#
# Here we have assumed that our model space defined by the operator $\hat{P}$ is one-dimensional, meaning that
# $$
# \hat{P}= \vert \Phi_0\rangle \langle \Phi_0\vert ,
# $$
# and
# $$
# \hat{Q}=\sum_{m=1}^{\infty}\vert \Phi_m\rangle \langle \Phi_m\vert .
# $$
# We can thus rewrite the exact wave function as
# $$
# \vert \Psi_0\rangle= (\hat{P}+\hat{Q})\vert \Psi_0\rangle=\vert \Phi_0\rangle+\hat{Q}\vert \Psi_0\rangle.
# $$
# Going back to the Schr\"odinger equation, we can rewrite it as, adding and a subtracting a term $\omega \vert \Psi_0\rangle$ as
# $$
# \left(\omega-\hat{H}_0\right)\vert \Psi_0\rangle=\left(\omega-E+\hat{H}_I\right)\vert \Psi_0\rangle,
# $$
# where $\omega$ is an energy variable to be specified later.
#
#
# We assume also that the resolvent of $\left(\omega-\hat{H}_0\right)$ exits, that is
# it has an inverse which defined the unperturbed Green's function as
# $$
# \left(\omega-\hat{H}_0\right)^{-1}=\frac{1}{\left(\omega-\hat{H}_0\right)}.
# $$
# We can rewrite Schroedinger's equation as
# $$
# \vert \Psi_0\rangle=\frac{1}{\omega-\hat{H}_0}\left(\omega-E+\hat{H}_I\right)\vert \Psi_0\rangle,
# $$
# and multiplying from the left with $\hat{Q}$ results in
# $$
# \hat{Q}\vert \Psi_0\rangle=\frac{\hat{Q}}{\omega-\hat{H}_0}\left(\omega-E+\hat{H}_I\right)\vert \Psi_0\rangle,
# $$
# which is possible since we have defined the operator $\hat{Q}$ in terms of the eigenfunctions of $\hat{H}$.
#
#
#
#
# These operators commute meaning that
# $$
# \hat{Q}\frac{1}{\left(\omega-\hat{H}_0\right)}\hat{Q}=\hat{Q}\frac{1}{\left(\omega-\hat{H}_0\right)}=\frac{\hat{Q}}{\left(\omega-\hat{H}_0\right)}.
# $$
# With these definitions we can in turn define the wave function as
# $$
# \vert \Psi_0\rangle=\vert \Phi_0\rangle+\frac{\hat{Q}}{\omega-\hat{H}_0}\left(\omega-E+\hat{H}_I\right)\vert \Psi_0\rangle.
# $$
# This equation is again nothing but a formal rewrite of Schr\"odinger's equation
# and does not represent a practical calculational scheme.
# It is a non-linear equation in two unknown quantities, the energy $E$ and the exact
# wave function $\vert \Psi_0\rangle$. We can however start with a guess for $\vert \Psi_0\rangle$ on the right hand side of the last equation.
#
#
#
# The most common choice is to start with the function which is expected to exhibit the largest overlap with the wave function we are searching after, namely $\vert \Phi_0\rangle$. This can again be inserted in the solution for $\vert \Psi_0\rangle$ in an iterative fashion and if we continue along these lines we end up with
# $$
# \vert \Psi_0\rangle=\sum_{i=0}^{\infty}\left\{\frac{\hat{Q}}{\omega-\hat{H}_0}\left(\omega-E+\hat{H}_I\right)\right\}^i\vert \Phi_0\rangle,
# $$
# for the wave function and
# $$
# \Delta E=\sum_{i=0}^{\infty}\langle \Phi_0\vert \hat{H}_I\left\{\frac{\hat{Q}}{\omega-\hat{H}_0}\left(\omega-E+\hat{H}_I\right)\right\}^i\vert \Phi_0\rangle,
# $$
# which is now a perturbative expansion of the exact energy in terms of the interaction
# $\hat{H}_I$ and the unperturbed wave function $\vert \Psi_0\rangle$.
#
#
#
# In our equations for $\vert \Psi_0\rangle$ and $\Delta E$ in terms of the unperturbed
# solutions $\vert \Phi_i\rangle$ we have still an undetermined parameter $\omega$
# and a dependecy on the exact energy $E$. Not much has been gained thus from a practical computational point of view.
#
# In Brilluoin-Wigner perturbation theory it is customary to set $\omega=E$. This results in the following perturbative expansion for the energy $\Delta E$
# 1
# 7
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \langle \Phi_0\vert \left(\hat{H}_I+\hat{H}_I\frac{\hat{Q}}{E-\hat{H}_0}\hat{H}_I+
# \hat{H}_I\frac{\hat{Q}}{E-\hat{H}_0}\hat{H}_I\frac{\hat{Q}}{E-\hat{H}_0}\hat{H}_I+\dots\right)\vert \Phi_0\rangle.
# $$
# 1
# 9
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \langle \Phi_0\vert \left(\hat{H}_I+\hat{H}_I\frac{\hat{Q}}{E-\hat{H}_0}\hat{H}_I+
# \hat{H}_I\frac{\hat{Q}}{E-\hat{H}_0}\hat{H}_I\frac{\hat{Q}}{E-\hat{H}_0}\hat{H}_I+\dots\right)\vert \Phi_0\rangle.
# $$
# This expression depends however on the exact energy $E$ and is again not very convenient from a practical point of view. It can obviously be solved iteratively, by starting with a guess for $E$ and then solve till some kind of self-consistency criterion has been reached.
#
# Actually, the above expression is nothing but a rewrite again of the full Schr\"odinger equation.
#
# Defining $e=E-\hat{H}_0$ and recalling that $\hat{H}_0$ commutes with
# $\hat{Q}$ by construction and that $\hat{Q}$ is an idempotent operator
# $\hat{Q}^2=\hat{Q}$.
# Using this equation in the above expansion for $\Delta E$ we can write the denominator
# 2
# 1
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \hat{Q}\left[\frac{1}{\hat{e}}+\frac{1}{\hat{e}}\hat{Q}\hat{H}_I\hat{Q}
# \frac{1}{\hat{e}}+\frac{1}{\hat{e}}\hat{Q}\hat{H}_I\hat{Q}
# \frac{1}{\hat{e}}\hat{Q}\hat{H}_I\hat{Q}\frac{1}{\hat{e}}+\dots\right]\hat{Q}.
# $$
# Inserted in the expression for $\Delta E$ leads to
# $$
# \Delta E=
# \langle \Phi_0\vert \hat{H}_I+\hat{H}_I\hat{Q}\frac{1}{E-\hat{H}_0-\hat{Q}\hat{H}_I\hat{Q}}\hat{Q}\hat{H}_I\vert \Phi_0\rangle.
# $$
# In RS perturbation theory we set $\omega = W_0$ and obtain the following expression for the energy difference
# 2
# 4
#
# <
# <
# <
# !
# !
# M
# A
# T
# H
# _
# B
# L
# O
# C
# K
# $$
# \langle \Phi_0\vert \left(\hat{H}_I+\hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}(\hat{H}_I-\Delta E)+
# \hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}(\hat{H}_I-\Delta E)\frac{\hat{Q}}{W_0-\hat{H}_0}(\hat{H}_I-\Delta E)+\dots\right)\vert \Phi_0\rangle.
# $$
# Recalling that $\hat{Q}$ commutes with $\hat{H_0}$ and since $\Delta E$ is a constant we obtain that
# $$
# \hat{Q}\Delta E\vert \Phi_0\rangle = \hat{Q}\Delta E\vert \hat{Q}\Phi_0\rangle = 0.
# $$
# Inserting this results in the expression for the energy results in
# $$
# \Delta E=\langle \Phi_0\vert \left(\hat{H}_I+\hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}\hat{H}_I+
# \hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}(\hat{H}_I-\Delta E)\frac{\hat{Q}}{W_0-\hat{H}_0}\hat{H}_I+\dots\right)\vert \Phi_0\rangle.
# $$
# We can now this expression in terms of a perturbative expression in terms
# of $\hat{H}_I$ where we iterate the last expression in terms of $\Delta E$
# $$
# \Delta E=\sum_{i=1}^{\infty}\Delta E^{(i)}.
# $$
# We get the following expression for $\Delta E^{(i)}$
# $$
# \Delta E^{(1)}=\langle \Phi_0\vert \hat{H}_I\vert \Phi_0\rangle,
# $$
# which is just the contribution to first order in perturbation theory,
# $$
# \Delta E^{(2)}=\langle\Phi_0\vert \hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}\hat{H}_I\vert \Phi_0\rangle,
# $$
# which is the contribution to second order.
# $$
# \Delta E^{(3)}=\langle \Phi_0\vert \hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}\hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}\hat{H}_I\Phi_0\rangle-
# \langle\Phi_0\vert \hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}\langle \Phi_0\vert \hat{H}_I\vert \Phi_0\rangle\frac{\hat{Q}}{W_0-\hat{H}_0}\hat{H}_I\vert \Phi_0\rangle,
# $$
# being the third-order contribution.
#
#
# ## Interpreting the correlation energy and the wave operator
#
# In the shell-model lectures we showed that we could rewrite the exact state function for say the ground state, as a linear expansion in terms of all possible Slater determinants. That is, we
# define the ansatz for the ground state as
# $$
# |\Phi_0\rangle = \left(\prod_{i\le F}\hat{a}_{i}^{\dagger}\right)|0\rangle,
# $$
# where the index $i$ defines different single-particle states up to the Fermi level. We have assumed that we have $N$ fermions.
# A given one-particle-one-hole ($1p1h$) state can be written as
# $$
# |\Phi_i^a\rangle = \hat{a}_{a}^{\dagger}\hat{a}_i|\Phi_0\rangle,
# $$
# while a $2p2h$ state can be written as
# $$
# |\Phi_{ij}^{ab}\rangle = \hat{a}_{a}^{\dagger}\hat{a}_{b}^{\dagger}\hat{a}_j\hat{a}_i|\Phi_0\rangle,
# $$
# and a general $ApAh$ state as
# $$
# |\Phi_{ijk\dots}^{abc\dots}\rangle = \hat{a}_{a}^{\dagger}\hat{a}_{b}^{\dagger}\hat{a}_{c}^{\dagger}\dots\hat{a}_k\hat{a}_j\hat{a}_i|\Phi_0\rangle.
# $$
# We use letters $ijkl\dots$ for states below the Fermi level and $abcd\dots$ for states above the Fermi level. A general single-particle state is given by letters $pqrs\dots$.
#
# We can then expand our exact state function for the ground state
# as
# $$
# |\Psi_0\rangle=C_0|\Phi_0\rangle+\sum_{ai}C_i^a|\Phi_i^a\rangle+\sum_{abij}C_{ij}^{ab}|\Phi_{ij}^{ab}\rangle+\dots
# =(C_0+\hat{C})|\Phi_0\rangle,
# $$
# where we have introduced the so-called correlation operator
# $$
# \hat{C}=\sum_{ai}C_i^a\hat{a}_{a}^{\dagger}\hat{a}_i +\sum_{abij}C_{ij}^{ab}\hat{a}_{a}^{\dagger}\hat{a}_{b}^{\dagger}\hat{a}_j\hat{a}_i+\dots
# $$
# Since the normalization of $\Psi_0$ is at our disposal and since $C_0$ is by hypothesis non-zero, we may arbitrarily set $C_0=1$ with
# corresponding proportional changes in all other coefficients. Using this so-called intermediate normalization we have
# $$
# \langle \Psi_0 | \Phi_0 \rangle = \langle \Phi_0 | \Phi_0 \rangle = 1,
# $$
# resulting in
# $$
# |\Psi_0\rangle=(1+\hat{C})|\Phi_0\rangle.
# $$
# In a shell-model calculation, the unknown coefficients in $\hat{C}$ are the
# eigenvectors which result from the diagonalization of the Hamiltonian matrix.
#
# How can we use perturbation theory to determine the same coefficients? Let us study the contributions to second order in the interaction, namely
# $$
# \Delta E^{(2)}=\langle\Phi_0\vert \hat{H}_I\frac{\hat{Q}}{W_0-\hat{H}_0}\hat{H}_I\vert \Phi_0\rangle.
# $$
# The intermediate states given by $\hat{Q}$ can at most be of a $2p-2h$ nature if we have a two-body Hamiltonian. This means that second order in the perturbation theory can have $1p-1h$ and $2p-2h$ at most as intermediate states. When we diagonalize, these contributions are included to infinite order. This means that higher-orders in perturbation theory bring in more complicated correlations.
#
# If we limit the attention to a Hartree-Fock basis, then we have that
# $\langle\Phi_0\vert \hat{H}_I \vert 2p-2h\rangle$ is the only contribution and the contribution to the energy reduces to
# $$
# \Delta E^{(2)}=\frac{1}{4}\sum_{abij}\langle ij\vert \hat{v}\vert ab\rangle \frac{\langle ab\vert \hat{v}\vert ij\rangle}{\epsilon_i+\epsilon_j-\epsilon_a-\epsilon_b}.
# $$
# If we compare this to the correlation energy obtained from full configuration interaction theory with a Hartree-Fock basis, we found that
# $$
# E-E_0 =\Delta E=
# \sum_{abij}\langle ij | \hat{v}| ab \rangle C_{ij}^{ab},
# $$
# where the energy $E_0$ is the reference energy and $\Delta E$ defines the so-called correlation energy.
#
# We see that if we set
# $$
# C_{ij}^{ab} =\frac{1}{4}\frac{\langle ab \vert \hat{v} \vert ij \rangle}{\epsilon_i+\epsilon_j-\epsilon_a-\epsilon_b},
# $$
# we have a perfect agreement between FCI and MBPT. However, FCI includes such $2p-2h$ correlations to infinite order. In order to make a meaningful comparison we would at least need to sum such correlations to infinite order in perturbation theory.
#
# Summing up, we can see that
# * MBPT introduces order-by-order specific correlations and we make comparisons with exact calculations like FCI
#
# * At every order, we can calculate all contributions since they are well-known and either tabulated or calculated on the fly.
#
# * MBPT is a non-variational theory and there is no guarantee that higher orders will improve the convergence.
#
# * However, since FCI calculations are limited by the size of the Hamiltonian matrices to diagonalize (today's most efficient codes can attach dimensionalities of ten billion basis states, MBPT can function as an approximative method which gives a straightforward (but tedious) calculation recipe.
#
# * MBPT has been widely used to compute effective interactions for the nuclear shell-model.
#
# * But there are better methods which sum to infinite order important correlations. Coupled cluster theory is one of these methods.
| doc/LectureNotes/mbpt.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/aTom-Pie/dw_matrix/blob/master/Matrix2_day3_FI_simple_model.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="ezam9LCQi5e3" colab_type="code" outputId="d3870c54-a25d-459c-aa1c-f119e76cd293" colab={"base_uri": "https://localhost:8080/", "height": 255}
# !pip install --upgrade tables
# !pip install eli5
# + id="kK9ulby1jU4Q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="8d002818-fc8f-4926-ffe7-639d0e4e4ed3"
import pandas as pd
import numpy as np
from sklearn.dummy import DummyRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error as mae
from sklearn.model_selection import cross_val_score
import eli5
from eli5.sklearn import PermutationImportance
# + id="3kvmakQQqE3m" colab_type="code" outputId="08d48631-a437-471a-e648-16489737b893" colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd /content/drive/My Drive/Colab Notebooks/Matrix2/data
# + id="kQ9G0m0YhYfR" colab_type="code" outputId="014e9e93-c80c-402e-e4ec-e7075f3aa7c2" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !pwd
# + id="Rfb3zbFFhqqV" colab_type="code" colab={}
# # !curl -L http://bit.ly/dw_car_data -o car.h5
# + id="JdS-eZyxiRDG" colab_type="code" outputId="ee417b58-1eb6-4248-e1f8-7dd16612bd6b" colab={"base_uri": "https://localhost:8080/", "height": 51}
# !ls -lh
# + id="5DGYEcl1iipL" colab_type="code" outputId="8e8c789b-b7f1-4282-d87f-eb4051257699" colab={"base_uri": "https://localhost:8080/", "height": 34}
df = pd.read_hdf('/content/drive/My Drive/Colab Notebooks/Matrix2/data/car.h5')
df.shape
# + id="_ydm1a2K33dR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 187} outputId="43137ec6-5440-4c55-84ef-b2e0ee0ef0ec"
df.columns
# + [markdown] id="SrJLEK_A4kBL" colab_type="text"
# ## DUMY MODEL
#
# + id="BOUNO51z4HuM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4ce6c6a8-c251-4e50-d183-11387e258527"
df.select_dtypes(np.number).columns
# + id="Wpi8C-FJ4dzu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="51248bf8-63e9-4dab-e69b-94e9e27cab03"
feats = ['car_id']
X = df[ feats ].values
y = df['price_value'].values
model = DummyRegressor()
model.fit(X,y)
y_pred = model.predict(X)
mae (y, y_pred)
# + id="gwUt6GWr5xsu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="205877a3-4a04-4735-dafe-50784b9d2743"
[x for x in df.columns if 'price' in x]
# + id="5wMY0JgI6tM0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="df22677a-8b4f-435d-fae5-1ac14fb27fee"
df['price_currency'].value_counts()
# + id="OVjvI8rr7BPO" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="6cfaebec-a573-41ab-a79a-5e3e87590347"
df['price_currency'].value_counts(normalize = True)*100
# + id="YC9ZwbAB7Wnh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="076a4ed5-1665-4542-dcdb-c08d09e9461b"
df = df[df['price_currency'] != 'EUR']
print (df.shape)
df['price_currency'].value_counts()
# + [markdown] id="Y6cDZq-V8NN8" colab_type="text"
# ## FEATURES
# + id="Fezm0S8P8O5M" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 678} outputId="90200ed9-f86f-4e93-8d1d-08ba6d25813e"
df.head()
# + id="yZ8H40Ts7_Ga" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 429} outputId="d63e8360-d285-4737-d46d-07e6965d76f0"
df.sample(3)
# + id="AOrptCJ19Jox" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="542e2f29-5f11-440d-a9b2-7a99c2c0bd22"
# df['param_color'].factorize()
df['param_color'].factorize()[0]
# + id="lhPGnUMB8dlb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="24748e0a-2008-4ec7-e8ff-2a1722d33a2e"
for feat in df.columns:
print(feat)
# + id="3SslPr-y83Ed" colab_type="code" colab={}
SUFFIX_CAT = '__cat'
for feat in df.columns:
if isinstance(df[feat][0], list): continue
factorized_values = df[feat].factorize()[0]
if SUFFIX_CAT in feat:
df[feat] = factorized_values
else:
df[feat + SUFFIX_CAT] = factorized_values
# + id="Mq45Mx9J-pvq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bdc4dd2e-0c7f-4bff-ec04-965a9dbcb321"
cat_feats = [x for x in df.columns if SUFFIX_CAT in x]
cat_feats = [x for x in cat_feats if 'price' not in x]
print (len(cat_feats))
# cat_feats
# + id="b6DRvA17CZ6X" colab_type="code" colab={}
X = df[cat_feats].values
y = df['price_value'].values
model = DecisionTreeRegressor(max_depth=6)
scores = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
# + id="q0odT0rrFzYF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="6255fe32-55da-445a-cfe8-b98ee7a963f2"
np.mean(scores)
# + id="lBqT0Is-FC8T" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="926aea03-38fe-4500-e81b-4eb83f5ada31"
m = DecisionTreeRegressor(max_depth=6)
m.fit (X,y,)
# scores1 = cross_val_score(model, X, y, cv=3, scoring='neg_mean_absolute_error')
# print (scores1)
imp = PermutationImportance(m, random_state=2020).fit(X,y)
eli5.show_weights(m, feature_names=cat_feats)
# + id="3OtMPtKwHGyF" colab_type="code" colab={}
| Matrix2_day3_FI_simple_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
import os
os.chdir('..')
import pandas as pd
import altair as alt
alt.renderers.enable('notebook')
from src import day1, day2, day4, computer, day6, day7, day10, day11, day12, day13, day14
# # Day 1
# ## Part 1
fuel = day1.compute_fuel_requirements('mass.csv')
print('Sum of the fuel requirements: {}'.format(fuel))
# ## Part 2
fuel = day1.compute_all_fuel_requirements('mass.csv')
print('Sum of the fuel requirements, taking fuel mass into account: {}'.format(fuel))
# # Day 2
# ## Part 1
program = [1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,1,6,19,1,9,19,23,2,23,10,27,1,27,5,31,1,31,6,35,1,6,35,39,2,39,13,43,1,9,43,47,2,9,47,51,1,51,6,55,2,55,10,59,1,59,5,63,2,10,63,67,2,9,67,71,1,71,5,75,2,10,75,79,1,79,6,83,2,10,83,87,1,5,87,91,2,9,91,95,1,95,5,99,1,99,2,103,1,103,13,0,99,2,14,0,0]
# Restore the gravity assist program to the "1202 program alarm" state: replace position 1 with the value 12 and replace position 2 with the value 2.
program[1] = 12
program[2] = 2
result = day2.intcode_computer(program)
print('Value left at position 0: {}'.format(result[0]))
# ## Part 2
program = [1,0,0,3,1,1,2,3,1,3,4,3,1,5,0,3,2,1,6,19,1,9,19,23,2,23,10,27,1,27,5,31,1,31,6,35,1,6,35,39,2,39,13,43,1,9,43,47,2,9,47,51,1,51,6,55,2,55,10,59,1,59,5,63,2,10,63,67,2,9,67,71,1,71,5,75,2,10,75,79,1,79,6,83,2,10,83,87,1,5,87,91,2,9,91,95,1,95,5,99,1,99,2,103,1,103,13,0,99,2,14,0,0]
output = 19690720
code = day2.get_error_code(output=output, program=program)
print('Code error: {}'.format(code))
# # Day 3
# This day can be run using "streamlit run app_day3", which will launch a streamlit application to get the solution.
# # Day 4
# ## Part 1
# Although it seems that there should be an explicit formula to solve this problem, it can be obtained by brute force, since the interval of the numbers is not very big.
n = day4.count_passwords((254032,789860))
print('Number of possible passwords: {}'.format(n))
# The idea to get the formula would be: _Starting with 52 and ending in 100. For the second digit there are 5 possibilities (from 5 to 9). And if the second digit is fixed, for the last digit there is only one possible value (5 now). So we will have 5 valid values (55, 66, 77, 88, 99)._
#
# It might be possible to extend this reasoning to a general case, but it is not obvious how to know that the possible values are still under the upper threshold in an automatic way.
# ## Part 2
n = day4.count_passwords_part2((254032,789860))
print('Number of possible passwords: {}'.format(n))
# # Day 5
# ## Part 1
# The computer code has been modified to adapt it to day 7 part 2. The computer run the program in a dedicated thread, which waits for an input to be set. After the execution is finished, the output can be retrieved.
#
# In order to store the inputs and outputs two synchronized queues are used (which make it possible to connect several computers). These queues can be passed as argument when the computer is created.
f = open('data/raw/day5/program.txt', 'r')
program = f.read()
program = list(map(int, program.split(',')))
# Launch the program execution, which keeps waiting until an input is send.
pc = computer.intcode_computer(program)
pc.run_program()
# Add a new input.
pc.set_input(1)
# Get the output.
pc.get_output()
# ## Part 2
test = [3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99]
pc = computer.intcode_computer(test)
pc.set_input(2)
pc.run_program()
pc.get_output()
print('Diagnostic code for system ID 5:')
pc = computer.intcode_computer(program)
pc.set_input(5)
pc.run_program()
pc.get_output()
# # Day 6
# ## Part 1
# This implementation is quite inefficient, since it uses Depth First Search on the tree, starting always with "COM". Then, the same path is computed a lot of different times for big graphs. It could be optimized moving also the starting point.
#
# Test tree:
test_programs = ['COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J', 'J)K', 'K)L']
g = day6.create_graph(orbits=test_orbits)
print('Number of orbits: {}'.format(day6.count_orbits(g)))
# Challenge input:
g = day6.create_graph(orbits_path='orbits.txt',
root_path='data/raw/day6')
print('Number of orbits: {}'.format(day6.count_orbits(g)))
# ## Part 2
# Now we have to create an undirected graph to represent the orbits, in order to be able to go back in the orbital path.
#
# Test graph:
test_orbits = ['COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J', 'J)K', 'K)L', 'K)YOU', 'I)SAN']
g = day6.create_graph(orbits=test_orbits, directed=False)
print('Number of transfers: {}'.format(day6.count_transfers(g)))
# Using test input:
g = day6.create_graph(orbits_path='orbits.txt',
root_path='data/raw/day6',
directed=False
)
print('Number of transfers: {}'.format(day6.count_transfers(g)))
# # Day 7
# ## Part 1
# Test programs:
# +
test_program = '3,15,3,16,1002,16,10,16,1,16,15,15,4,15,99,0,0' #Solution: 43210 - 4,3,2,1,0
# test_program = '3,23,3,24,1002,24,10,24,1002,23,-1,23,101,5,23,23,1,24,23,23,4,23,99,0,0' #Solution: 54321 - 0,1,2,3,4
# test_program = '3,31,3,32,1002,32,10,32,1001,31,-2,31,1007,31,0,33,1002,33,7,33,1,33,31,31,1,32,31,31,4,31,99,0,0,0' #Solution: 65210 - 1,0,4,3,2
max_phase = day7.highest_amplification_signal(program=test_program,
sequence_signals=[0,1,2,3,4])
print('Max thruster signal {m} (from phase setting sequence {p})'.format(m=max_phase[1],
p=max_phase[0]))
# -
# Challenge input:
max_phase = day7.highest_amplification_signal(program_path='program.txt',
root_path='data/raw/day7',
sequence_signals=[0,1,2,3,4])
print('Max thruster signal {m} (from phase setting sequence {p})'.format(m=max_phase[1],
p=max_phase[0]))
# ## Part 2
# Test programs:
# +
# test_program = '3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5' #Solution: 139629729 - 9,8,7,6,5
test_program = '3,52,1001,52,-5,52,3,53,1,52,56,54,1007,54,5,55,1005,55,26,1001,54,-5,54,1105,1,12,1,53,54,53,1008,54,0,55,1001,55,1,55,2,53,55,53,4,53,1001,56,-1,56,1005,56,6,99,0,0,0,0,10' #Solution: 18216 - 9,7,8,5,6
max_phase = day7.highest_amplification_signal(program=test_program,
sequence_signals=[5,6,7,8,9],
feedback_loop=True
)
print('Max thruster signal {m} (from phase setting sequence {p})'.format(m=max_phase[1],
p=max_phase[0]))
# -
# Challenge input:
max_phase = day7.highest_amplification_signal(program_path='program.txt',
root_path='data/raw/day7',
sequence_signals=[5,6,7,8,9],
feedback_loop=True
)
print('Max thruster signal {m} (from phase setting sequence {p})'.format(m=max_phase[1],
p=max_phase[0]))
# # Day 9
# ## Part 1
# Test programs:
# +
test_program = [109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99] #Solution: Itself
# test_program = [1102,34915192,34915192,7,4,7,99] #Solution: 16 digits int
# test_program = [104,1125899906842624,99] #Solution: Integer in the middle
pc = computer.intcode_computer(test_program)
pc.run_program()
pc.get_output()
# -
# Challenge input:
f = open('data/raw/day1/program.txt', 'r')
program = f.read()
program = list(map(int, program.split(',')))
pc = computer.intcode_computer(program, test_mode=False)
pc.set_input(1)
pc.run_program()
pc.get_output()
# ### Part 2
# It is the same as part 1 but just changing the input value.
pc.start_computer()
pc.set_input(2)
pc.run_program()
pc.get_output()
# ## Day 10
# ### Part 1
# +
# Test maps
# map_file = 'map_test1.txt' # Solution: 8, (3,4)
# map_file = 'map_test2.txt' # Solution: 33, (5,8)
# map_file = 'map_test3.txt' # Solution: 35, (1,2)
# map_file = 'map_test4.txt' # Solution: 41, (6,3)
# map_file = 'map_test5.txt' # Solution: 210, (11,13)
# Challenge input
map_file = 'map.txt'
asteroids = day10.read_asteroid_map(map_path=map_file, root_path = 'data/raw/day10')
detected = day10.count_optimal_detected(asteroids)
print('{n} detected asteroids from optimal position {p}'.format(n=detected[1], p=detected[0]))
# -
# ### Part 2
# +
# Test maps
# map_file = 'map_test6.txt'; station = (8,3); n = None
# map_file = 'map_test5.txt'; station = (11,13); n = 200
# Challenge input
map_file = 'map.txt'; station = (22, 19); n = 200
asteroids = day10.read_asteroid_map(map_path=map_file, root_path = 'data/raw/day10')
vaporized = day10.vaporize_asteroids(asteroids, location=station, n_asteroids=n)
print('{n}th asteroid to be vaporized: {p}'.format(n=n, p=vaporized[-1]))
# -
vap200 = vaporized[-1]
print('Multiply X coordinate by 100 and then add Y coordinate: {}'.format(100*vap200[0]+vap200[1]))
# Animation of vaporized asteroids:
import matplotlib
# %matplotlib inline
matplotlib.rcParams['animation.writer'] = 'ffmpeg' # Needs to be intalled in the system!
matplotlib.rc('animation', html='html5')
anim = day10.animated_vaporization(asteroids, station, vaporized)
anim
# # Day 11
# ## Part 1
# Get painting robot positions.
f = open('data/raw/day11/program.txt', 'r')
program = f.read()
program = list(map(int, program.split(',')))
paintings = day11.get_painted_positions(program)
print('Panels painted at least once: {}'.format(len(paintings.keys())))
# ## Part 2
day11.paint_registration_id(program)
# # Day 12
# ## Part 1
# Initial positions:
# - x=-2, y=9, z=-5
# - x=16, y=19, z=9
# - x=0, y=3, z=6
# - x=11, y=0, z=11
positions = pd.DataFrame({'x': [-2, 16, 0, 11],
'y': [9, 19, 3, 0],
'z': [-5, 9, 6, 11]})
n = 1000
energy = day12.compute_system_energy(positions, steps=n)
print(f'Total energy in the system after {n} steps: {energy}')
# Let's see how the moons move around using an interactive plot.
import matplotlib
# %matplotlib inline
matplotlib.rcParams['animation.writer'] = 'ffmpeg' # Needs to be intalled in the system!
matplotlib.rc('animation', html='html5')
day12.animate_system(positions, steps=100)
# ## Part 2
# Test example:
positions = pd.DataFrame({'x': [-1, 2, 4, 3],
'y': [0, -10, -8, 5],
'z': [2, -7, 8, -1]})
cicle = day12.cicle_length(positions)
print(f'Number of steps until the first repetition: {cicle}')
# Second example:
positions = pd.DataFrame({'x': [-8, 5, 2, 9],
'y': [-10, 5, -7, -8],
'z': [0, 10, 3, -3]})
cicle = day12.cicle_length(positions)
print(f'Number of steps until the first repetition: {cicle}')
# Challenge input:
positions = pd.DataFrame({'x': [-2, 16, 0, 11],
'y': [9, 19, 3, 0],
'z': [-5, 9, 6, 11]})
cicle = day12.cicle_length(positions)
print(f'Number of steps until the first repetition: {cicle}')
# # Day 13
# ## Part 1
arcade = day13.ArcadeCabinet(program_path='program.txt',
root_path='data/raw/day13')
arcade.start_game()
n_blocks = arcade.count_tiles('block')
print(f'{n_blocks} block tiles')
# ## Part 2
# This part should be run from the Ubuntu terminal, using: <br>
# _python_ <br>
# _>>> from src import day13_ <br>
# _>>> day13.play_arcade()_
#
# The game has an auto-play mode, in which the machine plays automatically: <br>
# _>>> day13.play_arcade(autoplay=True)_
#
# If you want to see just the final score without showing the game screen, you can use:<br>
# _>>> day13.play_arcade(autoplay=True, print_screen=False)_
#
# # Day 14
# ## Part 1
import networkx as nx
def read_reactions(file: str):
f = open(f'data/raw/day14/{file}', 'r')
reactions = f.readlines()
return [" ".join(r.split()) for r in reactions]
# file = 'test1.txt' #31 ORE
# file = 'test2.txt' #165 ORE
# file = 'test3.txt' #13312 ORE
# file = 'test4.txt' #180697 ORE
# file = 'test5.txt' #2210736 ORE
file = 'input.txt'
reactions = read_reactions(file)
G = day14.construct_tree(reactions)
nx.draw_networkx(G)
ore = day14.DFS_ORE(G)
print(f'{ore} ORE required to produce 1 FUEL')
# ## Part 2
# file = 'test4.txt' #5586022 fuel
# file = 'test5.txt' #460664 fuel
file = 'input.txt'
reactions = read_reactions(file)
G = day14.construct_tree(reactions)
n = 1000000000000
fuel = day14.DFS_n_fuel(G, n)
print(f'{fuel} units of FUEL created with {n} ORE')
| notebooks/Challenge_executions.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// ---
// # Using Azure Open Datasets in Synapse - Enrich NYC Green Taxi Data with Holiday and Weather
//
// Synapse has [Azure Open Datasets](https://azure.microsoft.com/en-us/services/open-datasets/) package pre-installed. This notebook provides examples of how to enrich NYC Green Taxi Data with Holiday and Weather with focusing on :
// - read Azure Open Dataset
// - manipulate the data to prepare for further analysis, including column projection, filtering, grouping and joins etc.
// - create a Spark table to be used in other notebooks for modeling training
// ## Data loading
// Let's first load the [NYC green taxi trip records](https://azure.microsoft.com/en-us/services/open-datasets/catalog/nyc-taxi-limousine-commission-green-taxi-trip-records/). The Open Datasets package contains a class representing each data source (NycTlcGreen for example) to easily filter date parameters before downloading.
// +
// Load nyc green taxi trip records from azure open dataset
val blob_account_name = "azureopendatastorage"
val nyc_blob_container_name = "nyctlc"
val nyc_blob_relative_path = "green"
val nyc_blob_sas_token = ""
val nyc_wasbs_path = f"wasbs://$nyc_blob_container_name@$blob_account_name.blob.core.windows.net/$nyc_blob_relative_path"
spark.conf.set(f"fs.azure.sas.$nyc_blob_container_name.$blob_account_name.blob.core.windows.net",nyc_blob_sas_token)
val nyc_tlc = spark.read.parquet(nyc_wasbs_path)
//nyc_tlc.show(5, truncate = false)
// +
// Filter data by time range
import java.sql.Timestamp
import org.joda.time.DateTime
val end_date = new Timestamp(DateTime.parse("2018-06-06").getMillis)
val start_date = new Timestamp(DateTime.parse("2018-05-01").getMillis)
val nyc_tlc_df = nyc_tlc.filter((nyc_tlc("lpepPickupDatetime") >= start_date) && (nyc_tlc("lpepPickupDatetime") <= end_date))
nyc_tlc_df.show(5, truncate = false)
// -
// Now that the initial data is loaded. Let's do some projection on the data to
// - create new columns for the month number, day of month, day of week, and hour of day. These info is going to be used in the training model to factor in time-based seasonality.
// - add a static feature for the country code to join holiday data.
// +
// Extract month, day of month, and day of week from pickup datetime and add a static column for the country code to join holiday data.
import org.apache.spark.sql.functions._
val nyc_tlc_df_expand = (
nyc_tlc_df.withColumn("datetime", to_date(col("lpepPickupDatetime")))
.withColumn("month_num",month(col("lpepPickupDatetime")))
.withColumn("day_of_month",dayofmonth(col("lpepPickupDatetime")))
.withColumn("day_of_week",dayofweek(col("lpepPickupDatetime")))
.withColumn("hour_of_day",hour(col("lpepPickupDatetime")))
.withColumn("country_code",lit("US"))
)
// +
// Display 5 rows
// nyc_tlc_df_expand.show(5, truncate = false)
// -
// Remove some of the columns that won't need for modeling or additional feature building.
//
//
//
// Remove unused columns from nyc green taxi data
val nyc_tlc_df_clean = nyc_tlc_df_expand.drop(
"lpepDropoffDatetime", "puLocationId", "doLocationId", "pickupLongitude",
"pickupLatitude", "dropoffLongitude","dropoffLatitude" ,"rateCodeID",
"storeAndFwdFlag","paymentType", "fareAmount", "extra", "mtaTax",
"improvementSurcharge", "tollsAmount", "ehailFee", "tripType" )
// Display 5 rows
nyc_tlc_df_clean.show(5, truncate = false)
// ## Enrich with holiday data
// Now that we have taxi data downloaded and roughly prepared, add in holiday data as additional features. Holiday-specific features will assist model accuracy, as major holidays are times where taxi demand increases dramatically and supply becomes limited.
//
// Let's load the [public holidays](https://azure.microsoft.com/en-us/services/open-datasets/catalog/public-holidays/) from Azure Open datasets.
//
// +
// Load public holidays data from azure open dataset
val hol_blob_container_name = "holidaydatacontainer"
val hol_blob_relative_path = "Processed"
val hol_blob_sas_token = ""
val hol_wasbs_path = f"wasbs://$hol_blob_container_name@$blob_account_name.blob.core.windows.net/$hol_blob_relative_path"
spark.conf.set(f"fs.azure.sas.$hol_blob_container_name.$blob_account_name.blob.core.windows.net",hol_blob_sas_token)
val hol_raw = spark.read.parquet(hol_wasbs_path)
// Filter data by time range
val hol_df = hol_raw.filter((hol_raw("date") >= start_date) && (hol_raw("date") <= end_date))
// Display 5 rows
// hol_df.show(5, truncate = false)
// -
// Rename the countryRegionCode and date columns to match the respective field names from the taxi data, and also normalize the time so it can be used as a key.
// +
val hol_df_clean = (
hol_df.withColumnRenamed("countryRegionCode","country_code")
.withColumn("datetime",to_date(col("date")))
)
hol_df_clean.show(5, truncate = false)
// -
// Next, join the holiday data with the taxi data by performing a left-join. This will preserve all records from taxi data, but add in holiday data where it exists for the corresponding datetime and country_code, which in this case is always "US". Preview the data to verify that they were merged correctly.
// +
// enrich taxi data with holiday data
val nyc_taxi_holiday_df = nyc_tlc_df_clean.join(hol_df_clean, Seq("datetime", "country_code") , "left")
nyc_taxi_holiday_df.show(5,truncate = false)
// +
// Create a temp table and filter out non empty holiday rows
nyc_taxi_holiday_df.createOrReplaceTempView("nyc_taxi_holiday_df")
val result = spark.sql("SELECT * from nyc_taxi_holiday_df WHERE holidayName is NOT NULL ")
result.show(5, truncate = false)
// -
// ## Enrich with weather data¶
//
// Now we append NOAA surface weather data to the taxi and holiday data. Use a similar approach to fetch the [NOAA weather history data](https://azure.microsoft.com/en-us/services/open-datasets/catalog/noaa-integrated-surface-data/) from Azure Open Datasets.
// +
// Load weather data from azure open dataset
val weather_blob_container_name = "isdweatherdatacontainer"
val weather_blob_relative_path = "ISDWeather/"
val weather_blob_sas_token = ""
val weather_wasbs_path = f"wasbs://$weather_blob_container_name@$blob_account_name.blob.core.windows.net/$weather_blob_relative_path"
spark.conf.set(f"fs.azure.sas.$weather_blob_container_name.$blob_account_name.blob.core.windows.net",hol_blob_sas_token)
val isd = spark.read.parquet(weather_wasbs_path)
// Display 5 rows
// isd.show(5, truncate = false)
// +
// Filter data by time range
val isd_df = isd.filter((isd("datetime") >= start_date) && (isd("datetime") <= end_date))
// Display 5 rows
isd_df.show(5, truncate = false)
// +
// Filter out weather info for new york city, remove the recording with null temperature
val weather_df = (
isd_df.filter(isd_df("latitude") >= "40.53")
.filter(isd_df("latitude") <= "40.88")
.filter(isd_df("longitude") >= "-74.09")
.filter(isd_df("longitude") <= "-73.72")
.filter(isd_df("temperature").isNotNull)
.withColumnRenamed("datetime","datetime_full")
)
// +
// Remove unused columns
val weather_df_clean = weather_df.drop("usaf", "wban", "longitude", "latitude").withColumn("datetime", to_date(col("datetime_full")))
//weather_df_clean.show(5, truncate = false)
// -
// Next group the weather data so that you have daily aggregated weather values.
//
// +
// Enrich weather data with aggregation statistics
val weather_df_grouped = (
weather_df_clean.groupBy('datetime).
agg(
mean('snowDepth) as "avg_snowDepth",
max('precipTime) as "max_precipTime",
mean('temperature) as "avg_temperature",
max('precipDepth) as "max_precipDepth"
)
)
weather_df_grouped.show(5, truncate = false)
// -
// Merge the taxi and holiday data you prepared with the new weather data. This time you only need the datetime key, and again perform a left-join of the data. Run the describe() function on the new dataframe to see summary statistics for each field.
// Enrich taxi data with weather
val nyc_taxi_holiday_weather_df = nyc_taxi_holiday_df.join(weather_df_grouped, Seq("datetime") ,"left")
nyc_taxi_holiday_weather_df.cache()
nyc_taxi_holiday_weather_df.show(5,truncate = false)
// Run the describe() function on the new dataframe to see summary statistics for each field.
display(nyc_taxi_holiday_weather_df.describe())
// The summary statistics shows that the totalAmount field has negative values, which don't make sense in the context.
//
//
// Remove invalid rows with less than 0 taxi fare or tip
val final_df = (
nyc_taxi_holiday_weather_df.
filter(nyc_taxi_holiday_weather_df("tipAmount") > 0).
filter(nyc_taxi_holiday_weather_df("totalAmount") > 0)
)
// ## Cleaning up the existing Database
//
// First we need to drop the tables since Spark requires that a database is empty before we can drop the Database.
//
// Then we recreate the database and set the default database context to it.
spark.sql("DROP TABLE IF EXISTS NYCTaxi.nyc_taxi_holiday_weather");
spark.sql("DROP DATABASE IF EXISTS NYCTaxi");
spark.sql("CREATE DATABASE NYCTaxi");
spark.sql("USE NYCTaxi");
// ## Creating a new table
// We create a nyc_taxi_holiday_weather table from the nyc_taxi_holiday_weather dataframe.
//
final_df.write.saveAsTable("nyc_taxi_holiday_weather");
val final_results = spark.sql("SELECT COUNT(*) FROM nyc_taxi_holiday_weather");
final_results.show(5, truncate = false)
| Notebooks/Scala/05 Using Azure Open Datasets in Synapse.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.3 64-bit (''e38'': conda)'
# name: python_defaultSpec_1596792317413
# ---
# # Spark Funds Analysis
# Spark Funds wants to make investments in a few companies. The CEO of Spark Funds wants to understand the global trends in investments so that she can take the investment decisions effectively.
# ## Constraints for investments
# - 5 to 15 million USD
# - English-speaking countries
# - Invest where most other investors are investing. This pattern is often observed among early stage startup investors
# ## The objective is to identify the best(where most investors are investing):
# - sectors : eight 'main sectors'
# - countries : most heavily invested
# - a suitable investment type for making investments : investment amounts in the venture, seed, angel, private equity etc
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# +
companies = pd.read_csv("companies.csv")
# -
| 02Statistics Essentials/01Exploratory Data Analysis/assignment/Assignment1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import shap
import sklearn
import numpy as np
import xgboost
import pandas
with open("/data/ctu/5/features.txt") as infile:
data = np.loadtxt(infile, delimiter=",")
y = data[:, 0]
X = data[:, 1:]
X = pandas.DataFrame(X, columns=[
"Src_Ave_SrcTotalBytes", "Src_Var_SrcTotalBytes",
"Src_Ave_DestTotalBytes", "Src_Var_DestTotalBytes",
"Src_Ave_Duration", "Src_Var_Duration",
"Src_Ave_SrcPayloadBytes", "Src_Var_SrcPayloadBytes",
"Src_Ave_DestPayloadBytes", "Src_Var_DestPayloadBytes",
"Src_Ave_FirstSeenSrcPacketCount","Src_Var_FirstSeenSrcPacketCount",
"Src_Ave_FirstSeenDestPacketCount","Src_Var_FirstSeenDestPacketCount",
"Dest_Ave_SrcTotalBytes", "Dest_Var_SrcTotalBytes",
"Dest_Ave_DestTotalBytes", "Dest_Var_DestTotalBytes",
"Dest_Ave_Duration", "Dest_Var_Duration",
"Dest_Ave_SrcPayloadBytes", "Dest_Var_SrcPayloadBytes",
"Dest_Ave_DestPayloadBytes", "Dest_Var_DestPayloadBytes",
"Dest_Ave_FirstSeenSrcPacketCount","Dest_Var_FirstSeenSrcPacketCount",
"Dest_Ave_FirstSeenDestPacketCount","Dest_Var_FirstSeenDestPacketCount"])
model = xgboost.XGBRegressor().fit(X,y)
explainer = shap.Explainer(model)
shap_values = explainer(X)
shap.plots.waterfall(shap_values[0]) # Benign example
shap.plots.waterfall(shap_values[45650]) # Malicious example
| scripts/learning-shap.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import itertools
import collections
'''
this are global vars only for the coding interview, usually that should be passed as an arguments via cli
for example with argparse: https://realpython.com/command-line-interfaces-python-argparse/
'''
illnesses = ['person1, melonoma', 'person2, sarcoma', 'person3, sarcoma', 'person4, sarcoma']
drugs = ['person1, drug1', 'person2, drug2', 'person3, drug3', 'person3, drug2', 'person4, drug3']
domains = ['melonoma, drug1', 'drug2, drug3, sarcoma', 'sarcoma, drug3']
'''
Not very efficient solution it is O^2 algorithm as there are twise nested loops
'''
def colocation(illnesses, drugs, domains):
'''
person_spec will store data in this way: {'person1: [drug1, melanoma]'}
domains_list will use nested list insstead of nested string: instead of
['melonoma, drug1', 'drug2, sarcoma] it will use the structure
[['melanoma', 'drug1'], ['drug2', 'sarcoma']]
result is the expected output of the function: [['person1'], ['person2', 'person3', 'person4'], ['person4']]
'''
person_spec = collections.defaultdict(list)
domains_list = []
result = []
for _ in range(len(domains)):
result.append([])
for i in illnesses:
ill_with = i.split(', ')
for v in ill_with:
if v != ill_with[0]:
person_spec[ill_with[0]].append(v)
for i in drugs:
drugs_with = i.split(', ')
for v in drugs_with:
if v != drugs_with[0]:
person_spec[drugs_with[0]].append(v)
for domain in domains:
domains_list.append(list(domain.split(', ')))
for k,v in person_spec.items():
counter = 0
for d in domains_list:
check = all(item in d for item in v)
if check:
result[counter].append(k)
counter += 1
print(result)
# -
colocation(illnesses, drugs, domains)
| flatiron_task.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fictional Army - Filtering and Sorting
# ### Introduction:
#
# This exercise was inspired by this [page](http://chrisalbon.com/python/)
#
# Special thanks to: https://github.com/chrisalbon for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
import pandas as pd
# ### Step 2. This is the data given as a dictionary
# Create an example dataframe about a fictional army
raw_data = {'regiment': ['Nighthawks', 'Nighthawks', 'Nighthawks', 'Nighthawks', 'Dragoons', 'Dragoons', 'Dragoons', 'Dragoons', 'Scouts', 'Scouts', 'Scouts', 'Scouts'],
'company': ['1st', '1st', '2nd', '2nd', '1st', '1st', '2nd', '2nd','1st', '1st', '2nd', '2nd'],
'deaths': [523, 52, 25, 616, 43, 234, 523, 62, 62, 73, 37, 35],
'battles': [5, 42, 2, 2, 4, 7, 8, 3, 4, 7, 8, 9],
'size': [1045, 957, 1099, 1400, 1592, 1006, 987, 849, 973, 1005, 1099, 1523],
'veterans': [1, 5, 62, 26, 73, 37, 949, 48, 48, 435, 63, 345],
'readiness': [1, 2, 3, 3, 2, 1, 2, 3, 2, 1, 2, 3],
'armored': [1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1],
'deserters': [4, 24, 31, 2, 3, 4, 24, 31, 2, 3, 2, 3],
'origin': ['Arizona', 'California', 'Texas', 'Florida', 'Maine', 'Iowa', 'Alaska', 'Washington', 'Oregon', 'Wyoming', 'Louisana', 'Georgia']}
# ### Step 3. Create a dataframe and assign it to a variable called army.
#
# #### Don't forget to include the columns names in the order presented in the dictionary ('regiment', 'company', 'deaths'...) so that the column index order is consistent with the solutions. If omitted, pandas will order the columns alphabetically.
army = pd.DataFrame(raw_data, columns=["regiment", "company", "deaths", "battles", "size", "veterans", "readiness", "armored", "deserters", "origin"])
# ### Step 4. Set the 'origin' colum as the index of the dataframe
army.set_index(keys="origin", inplace=True)
# ### Step 5. Print only the column veterans
army["veterans"]
# ### Step 6. Print the columns 'veterans' and 'deaths'
army[["veterans", "deaths"]]
# ### Step 7. Print the name of all the columns.
army.columns
# ### Step 8. Select the 'deaths', 'size' and 'deserters' columns from Maine and Alaska
army.loc[["Maine", "Alaska"], ["deaths", "size", "deserters"]]
# ### Step 9. Select the rows 3 to 7 and the columns 3 to 6
army.iloc[2:7, 2:6]
# ### Step 10. Select every row after the fourth row and all columns
army.iloc[4:, :]
# ### Step 11. Select every row up to the 4th row and all columns
army.iloc[:4, :]
# ### Step 12. Select the 3rd column up to the 7th column
army.iloc[:, 2:7]
# ### Step 13. Select rows where df.deaths is greater than 50
army[army["deaths"] > 50]
# ### Step 14. Select rows where df.deaths is greater than 500 or less than 50
army[(army["deaths"] > 500) | (army["deaths"] < 50)]
# ### Step 15. Select all the regiments not named "Dragoons"
army[army["regiment"] != "Dragoons"]
# ### Step 16. Select the rows called Texas and Arizona
army.loc[["Texas", "Arizona"]]
# ### Step 17. Select the third cell in the row named Arizona
army.loc[["Arizona"]].iloc[:, 2]
# ### Step 18. Select the third cell down in the column named deaths
army.loc[:, ["deaths"]].iloc[2]
| 02_Filtering_&_Sorting/Fictional Army/Exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] pycharm={}
# # Recurrent Neural Networks
# + [markdown] pycharm={}
# ## Univariate Time Series Regression
# + [markdown] pycharm={}
# This notebook demonstrates how to forecast the S&P 500 index using a Recurrent Neural Network.
# + [markdown] pycharm={}
# ## Imports & Settings
# -
import warnings
warnings.filterwarnings('ignore')
# + pycharm={}
# %matplotlib inline
from pathlib import Path
import numpy as np
import pandas as pd
import pandas_datareader.data as web
from scipy.stats import spearmanr
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM
from tensorflow import keras
import matplotlib.pyplot as plt
import seaborn as sns
# -
gpu_devices = tf.config.experimental.list_physical_devices('GPU')
if gpu_devices:
print('Using GPU')
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
else:
print('Using CPU')
# + pycharm={}
sns.set_style('whitegrid')
np.random.seed(42)
# -
results_path = Path('results', 'univariate_time_series')
if not results_path.exists():
results_path.mkdir(parents=True)
# + [markdown] pycharm={}
# ## Get Data
# + [markdown] pycharm={}
# We obtain data for 2010-2018 from the Federal Reserve Bank’s Data Service [FRED](https://fred.stlouisfed.org/) using the [pandas_datareader](https://pandas-datareader.readthedocs.io/) library in introduced in [Chapter 2 on Market and Fundamental Data](../02_market_and_fundamental_data).
# + pycharm={}
sp500 = web.DataReader('SP500', 'fred', start='2010', end='2020').dropna()
ax = sp500.plot(title='S&P 500',
legend=False,
figsize=(14, 4),
rot=0)
ax.set_xlabel('')
sns.despine()
# + [markdown] pycharm={}
# ## Preprocessing
# -
scaler = MinMaxScaler()
# + pycharm={}
sp500_scaled = pd.Series(scaler.fit_transform(sp500).squeeze(),
index=sp500.index)
sp500_scaled.describe()
# + [markdown] pycharm={}
# ## Generating recurrent sequences from our time series
#
# Our time series is a sequence of numbers indexed by time:
#
# $$x_{0},x_{1},x_{2},...,x_{T}$$
#
# where $\{x_t\}$ is the numerical value in period $t$ and $T$ is the total length of the series.
#
# To apply a RNN for regression of classification, we use a sliding window to construct a rolling set of input/output pairs for our model to learn from as animated below.
#
# <img src="../assets/timeseries_windowing.gif" width=600 height=600/>
# + [markdown] pycharm={}
# We will generate sequences of 63 trading days, approximately three months, and use a single LSTM layer with 20 hidden units to predict the index value one timestep ahead.
# The input to every LSTM layer must have three dimensions, namely:
# - **Samples**: One sequence is one sample. A batch contains one or more samples.
# - **Time Steps**: One time step is one point of observation in the sample.
# - **Features**: One feature is one observation at a time step.
#
# Our S&P 500 sample has 2,264 observations or time steps. We will create overlapping sequences using a window of 63 observations each.
# For a simpler window of size T = 5, we obtain input-output pairs as shown in the following table:
# + [markdown] pycharm={}
# $$\begin{array}{c|c}
# \text{Input} & \text{Output}\\
# \hline {\langle x_1,x_2,x_3,x_4,x_5\rangle} & { x_6} \\
# \ {\langle x_{2},x_{3},x_{4},x_{5},x_{6} \rangle } & {x_{7} } \\
# {\vdots} & {\vdots}\\
# { \langle x_{T-5},x_{T-4},x_{T-3},x_{T-2},x_{T-1} \rangle } & {x_{T}}
# \end{array}$$
# + [markdown] pycharm={}
# Generally speaking, for window size S, the relationship takes the form
#
# $$x_t = f( x_{t-1}, x_{t-2}, ..., x_{t-S}) \quad\forall t=S, S+1, ..., T$$
#
# Each of the $T-S$ lagged input sequence or vector is of length S with a corresponding scalar output.
# + [markdown] pycharm={}
# We can use the function create_univariate_rnn_data() to stack sequences selected using a rolling windows:
# + pycharm={}
def create_univariate_rnn_data(data, window_size):
n = len(data)
y = data[window_size:]
data = data.values.reshape(-1, 1) # make 2D
X = np.hstack(tuple([data[i: n-j, :] for i, j in enumerate(range(window_size, 0, -1))]))
return pd.DataFrame(X, index=y.index), y
# + [markdown] pycharm={}
# We apply this function to the rescaled stock index for a window_size=63 to obtain a two-dimensional dataset of shape number of samples x number of timesteps:
# + pycharm={}
window_size = 63
# + pycharm={}
X, y = create_univariate_rnn_data(sp500_scaled, window_size=window_size)
# + pycharm={}
X.head()
# + pycharm={}
y.head()
# + pycharm={}
X.shape
# + [markdown] pycharm={}
# ## Train-test split
#
# To respect the time series nature of the data, we set aside the data at the end of the sample as hold-out or test set. More specifically, we'll use the data for 2018.
# + pycharm={}
ax = sp500_scaled.plot(lw=2, figsize=(14, 4), rot=0)
ax.set_xlabel('')
sns.despine()
# + pycharm={}
X_train = X[:'2018'].values.reshape(-1, window_size, 1)
y_train = y[:'2018']
# keep the last year for testing
X_test = X['2019'].values.reshape(-1, window_size, 1)
y_test = y['2019']
# + pycharm={}
n_obs, window_size, n_features = X_train.shape
# + pycharm={}
y_train.shape
# + [markdown] pycharm={}
# ## Keras LSTM Layer
# + [markdown] pycharm={}
# Keras has several built-in RNN layers with various configuration options described in detail in the [documentation](https://keras.io/layers/recurrent/).
# + [markdown] pycharm={}
# ```
# LSTM(units,
# activation='tanh',
# recurrent_activation='hard_sigmoid',
# use_bias=True,
# kernel_initializer='glorot_uniform',
# recurrent_initializer='orthogonal',
# bias_initializer='zeros',
# unit_forget_bias=True,
# kernel_regularizer=None,
# recurrent_regularizer=None,
# bias_regularizer=None,
# activity_regularizer=None,
# kernel_constraint=None,
# recurrent_constraint=None,
# bias_constraint=None,
# dropout=0.0,
# recurrent_dropout=0.0,
# implementation=1,
# return_sequences=False,
# return_state=False,
# go_backwards=False,
# stateful=False,
# unroll=False)
# ```
# + [markdown] pycharm={}
# ## Define the Model Architecture
#
# Having created input/output pairs out of our time series and cut this into training/testing sets, we can now begin setting up our RNN. We use Keras to quickly build a two hidden layer RNN of the following specifications
#
# - layer 1 uses an LSTM module with 20 hidden units (note here the input_shape = (window_size,1))
# - layer 2 uses a fully connected module with one unit
# - the 'mean_squared_error' loss should be used (remember: we are performing regression here)
#
# This can be constructed using just a few lines - see e.g., the [general Keras documentation](https://keras.io/getting-started/sequential-model-guide/) and the [LSTM documentation in particular](https://keras.io/layers/recurrent/) for examples of how to quickly use Keras to build neural network models. Make sure you are initializing your optimizer given the [keras-recommended approach for RNNs](https://keras.io/optimizers/)
# + pycharm={}
rnn = Sequential([
LSTM(units=10,
input_shape=(window_size, n_features), name='LSTM'),
Dense(1, name='Output')
])
# + [markdown] pycharm={}
# The summary shows that the model has 1,781 parameters:
# + pycharm={}
rnn.summary()
# + [markdown] pycharm={}
# ## Train the Model
# + [markdown] pycharm={}
# We train the model using the RMSProp optimizer recommended for RNN with default settings and compile the model with mean squared error for this regression problem:
# + pycharm={}
optimizer = keras.optimizers.RMSprop(lr=0.001,
rho=0.9,
epsilon=1e-08,
decay=0.0)
# + pycharm={}
rnn.compile(loss='mean_squared_error',
optimizer=optimizer)
# + [markdown] pycharm={}
# We define an EarlyStopping callback and train the model for up to 100 episodes.
# -
rnn_path = (results_path / 'rnn.h5').as_posix()
checkpointer = ModelCheckpoint(filepath=rnn_path,
verbose=1,
monitor='val_loss',
save_best_only=True)
# + pycharm={}
early_stopping = EarlyStopping(monitor='val_loss',
patience=20,
restore_best_weights=True)
# + pycharm={}
lstm_training = rnn.fit(X_train,
y_train,
epochs=150,
batch_size=20,
shuffle=True,
validation_data=(X_test, y_test),
callbacks=[early_stopping, checkpointer],
verbose=1)
# + [markdown] pycharm={}
# Training stops after 51 epochs; the `early_stopping` callback restores the weights for the best model (after 41 epochs)
# + [markdown] pycharm={}
# ## Evaluate model performance
# + pycharm={}
fig, ax = plt.subplots(figsize=(12, 4))
loss_history = pd.DataFrame(lstm_training.history).pow(.5)
loss_history.index += 1
best_rmse = loss_history.val_loss.min()
best_epoch = loss_history.val_loss.idxmin()
title = f'5-Epoch Rolling RMSE (Best Validation RMSE: {best_rmse:.4%})'
loss_history.columns=['Training RMSE', 'Validation RMSE']
loss_history.rolling(5).mean().plot(logy=True, lw=2, title=title, ax=ax)
ax.axvline(best_epoch, ls='--', lw=1, c='k')
sns.despine()
fig.tight_layout()
fig.savefig(results_path / 'rnn_sp500_error', dpi=300);
# + pycharm={}
train_rmse_scaled = np.sqrt(rnn.evaluate(X_train, y_train, verbose=0))
test_rmse_scaled = np.sqrt(rnn.evaluate(X_test, y_test, verbose=0))
print(f'Train RMSE: {train_rmse_scaled:.4f} | Test RMSE: {test_rmse_scaled:.4f}')
# -
train_predict_scaled = rnn.predict(X_train)
test_predict_scaled = rnn.predict(X_test)
train_ic = spearmanr(y_train, train_predict_scaled)[0]
test_ic = spearmanr(y_test, test_predict_scaled)[0]
print(f'Train IC: {train_ic:.4f} | Test IC: {test_ic:.4f}')
# ### Rescale predictions
train_predict = pd.Series(scaler.inverse_transform(train_predict_scaled).squeeze(), index=y_train.index)
test_predict = (pd.Series(scaler.inverse_transform(test_predict_scaled)
.squeeze(),
index=y_test.index))
y_train_rescaled = scaler.inverse_transform(y_train.to_frame()).squeeze()
y_test_rescaled = scaler.inverse_transform(y_test.to_frame()).squeeze()
train_rmse = np.sqrt(mean_squared_error(train_predict, y_train_rescaled))
test_rmse = np.sqrt(mean_squared_error(test_predict, y_test_rescaled))
f'Train RMSE: {train_rmse:.2f} | Test RMSE: {test_rmse:.2f}'
sp500['Train Predictions'] = train_predict
sp500['Test Predictions'] = test_predict
sp500 = sp500.join(train_predict.to_frame('predictions').assign(data='Train')
.append(test_predict.to_frame('predictions').assign(data='Test')))
# ### Plot Results
# + pycharm={}
fig=plt.figure(figsize=(14,7))
ax1 = plt.subplot(221)
sp500.loc['2015':, 'SP500'].plot(lw=4, ax=ax1, c='k')
sp500.loc['2015':, ['Test Predictions', 'Train Predictions']].plot(lw=1, ax=ax1, ls='--')
ax1.set_title('In- and Out-of-sample Predictions')
with sns.axes_style("white"):
ax3 = plt.subplot(223)
sns.scatterplot(x='SP500', y='predictions', data=sp500, hue='data', ax=ax3)
ax3.text(x=.02, y=.95, s=f'Test IC ={test_ic:.2%}', transform=ax3.transAxes)
ax3.text(x=.02, y=.87, s=f'Train IC={train_ic:.2%}', transform=ax3.transAxes)
ax3.set_title('Correlation')
ax3.legend(loc='lower right')
ax2 = plt.subplot(222)
ax4 = plt.subplot(224, sharex = ax2, sharey=ax2)
sns.distplot(train_predict.squeeze()- y_train_rescaled, ax=ax2)
ax2.set_title('Train Error')
ax2.text(x=.03, y=.92, s=f'Train RMSE ={train_rmse:.4f}', transform=ax2.transAxes)
sns.distplot(test_predict.squeeze()-y_test_rescaled, ax=ax4)
ax4.set_title('Test Error')
ax4.text(x=.03, y=.92, s=f'Test RMSE ={test_rmse:.4f}', transform=ax4.transAxes)
sns.despine()
fig.tight_layout()
fig.savefig(results_path / 'rnn_sp500_regression', dpi=300);
# -
| ml4trading-2ed/19_recurrent_neural_nets/01_univariate_time_series_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # The 3ML workflow
#
# Generally, an analysis in 3ML is performed in 3 steps:
#
# 1. Load the data: one or more datasets are loaded and then listed in a DataList object
# 2. Define the model: a model for the data is defined by including one or more PointSource, ExtendedSource or ParticleSource instances
# 3. Perform a likelihood or a Bayesian analysis: the data and the model are used together to perform either a Maximum Likelihood analysis, or a Bayesian analysis
# ## Loading data
#
# 3ML is built around the concept of _plugins_. A plugin is used to load a particular type of data, or the data from a particular instrument. There is a plugin of optical data, one for X-ray data, one for Fermi/LAT data and so on. Plugins instances can be added and removed at the loading stage without changing any other stage of the analysis (but of course, you need to rerun all stages to update the results).
#
# First, let's import 3ML:
from threeML import *
import matplotlib.pyplot as plt
# %matplotlib notebook
# + nbsphinx="hidden"
plt.style.use('mike')
import warnings
warnings.filterwarnings('ignore')
# -
# Let's start by loading one dataset, which in the 3ML workflow means creating an instance of the appropriate plugin:
# +
# Get some example data
from threeML.io.package_data import get_path_of_data_file
data_path = get_path_of_data_file("datasets/xy_powerlaw.txt")
# Create an instance of the XYLike plugin, which allows to analyze simple x,y points
# with error bars
xyl = XYLike.from_text_file("xyl", data_path)
# Let's plot it just to see what we have loaded
fig = xyl.plot(x_scale='log', y_scale='log')
# -
# Now we need to create a DataList object, which in this case contains only one instance:
data = DataList(xyl)
# The DataList object can receive one or more plugin instances on initialization. So for example, to use two datasets we can simply do:
# +
# Create the second instance, this time of a different type
pha = get_path_of_data_file("datasets/ogip_powerlaw.pha")
bak = get_path_of_data_file("datasets/ogip_powerlaw.bak")
rsp = get_path_of_data_file("datasets/ogip_powerlaw.rsp")
ogip = OGIPLike("ogip", pha, bak, rsp)
# Now use both plugins
data = DataList(xyl, ogip)
# -
# The DataList object can accept any number of plugins in input.
#
# You can also create a list of plugins, and then create a DataList using the "expansion" feature of the python language ('*'), like this:
# +
# This is equivalent to write data = DataList(xyl, ogip)
my_plugins = [xyl, ogip]
data = DataList(*my_plugins)
# -
# This is useful if you need to create the list of plugins at runtime, for example looping over many files.
# ## Define the model
#
# After you have loaded your data, you need to define a model for them. A model is a collection of one or more sources. A source represents an astrophysical reality, like a star, a galaxy, a molecular cloud... There are 3 kinds of sources: PointSource, ExtendedSource and ParticleSource. The latter is used only in special situations. The models are defined using the package astromodels. Here we will only go through the basics. You can find a lot more information here: [astromodels.readthedocs.org](https://astromodels.readthedocs.org)
#
# ### Point sources
# A point source is characterized by a name, a position, and a spectrum. These are some examples:
# +
# A point source with a power law spectrum
source1_sp = Powerlaw()
source1 = PointSource("source1", ra=23.5, dec=-22.7, spectral_shape=source1_sp)
# Another source with a log-parabolic spectrum plus a power law
source2_sp = Log_parabola() + Powerlaw()
source2 = PointSource("source2", ra=30.5, dec=-27.1, spectral_shape=source2_sp)
# A third source defined in terms of its Galactic latitude and longitude
source3_sp = Cutoff_powerlaw()
source3 = PointSource("source3", l=216.1, b=-74.56, spectral_shape=source3_sp)
# -
# ### Extended sources
#
# An extended source is characterized by its spatial shape and its spectral shape:
# +
# An extended source with a Gaussian shape centered on R.A., Dec = (30.5, -27.1)
# and a sigma of 3.0 degrees
ext1_spatial = Gaussian_on_sphere(lon0=30.5, lat0=-27.1, sigma=3.0)
ext1_spectral = Powerlaw()
ext1 = ExtendedSource("ext1", ext1_spatial, ext1_spectral)
# An extended source with a 3D function
# (i.e., the function defines both the spatial and the spectral shape)
ext2_spatial = Continuous_injection_diffusion()
ext2 = ExtendedSource("ext2", ext2_spatial)
# -
# **NOTE**: not all plugins support extended sources. For example, the XYLike plugin we used above do not, as it is meant for data without spatial resolution.
# ### Create the likelihood model
# Now that we have defined our sources, we can create a model simply as:
# +
model = Model(source1, source2, source3, ext1, ext2)
# We can see a summary of the model like this:
model.display(complete=True)
# -
# You can easily interact with the model. For example:
# +
# Fix a parameter
model.source1.spectrum.main.Powerlaw.K.fix = True
# or
model.source1.spectrum.main.Powerlaw.K.free = False
# Free it again
model.source1.spectrum.main.Powerlaw.K.free = True
# or
model.source1.spectrum.main.Powerlaw.K.fix = False
# Change the value
model.source1.spectrum.main.Powerlaw.K = 2.3
# or using physical units (need to be compatible with what shown
# in the table above)
model.source1.spectrum.main.Powerlaw.K = 2.3 * 1 / (u.cm**2 * u.s * u.TeV)
# Change the boundaries for the parameter
model.source1.spectrum.main.Powerlaw.K.bounds = (1e-10, 1.0)
# you can use units here as well, like:
model.source1.spectrum.main.Powerlaw.K.bounds = (1e-5 * 1 / (u.cm**2 * u.s * u.TeV),
10.0 * 1 / (u.cm**2 * u.s * u.TeV))
# Link two parameters so that they are forced to have the same value
model.link(model.source2.spectrum.main.composite.K_1,
model.source1.spectrum.main.Powerlaw.K)
# Link two parameters with a law. The parameters of the law become free
# parameters in the fit. In this case we impose a linear relationship
# between the index of the log-parabolic spectrum and the index of the
# powerlaw in source2: index_2 = a * alpha_1 + b.
law = Line()
model.link(model.source2.spectrum.main.composite.index_2,
model.source2.spectrum.main.composite.alpha_1,
law)
# If you want to force them to be in a specific relationship,
# say index_2 = alpha_1 + 1, just fix a and b to the corresponding values,
# after the linking, like:
# model.source2.spectrum.main.composite.index_2.Line.a = 1.0
# model.source2.spectrum.main.composite.index_2.Line.a.fix = True
# model.source2.spectrum.main.composite.index_2.Line.b = 0.0
# model.source2.spectrum.main.composite.index_2.Line.b.fix = True
# Now display() will show the links
model.display(complete=True)
# -
# Now, for the following steps, let's keep it simple and let's use a single point source:
# +
new_model = Model(source1)
source1_sp.K.bounds = (0.01, 100)
# -
# A model can be saved to disk, and reloaded from disk, as:
# +
new_model.save("new_model.yml", overwrite=True)
new_model_reloaded = load_model("new_model.yml")
# -
# The output is in [YAML format](http://www.yaml.org/start.html), a human-readable text-based format.
# ## Perform the analysis
#
# ### Maximum likelihood analysis
#
# Now that we have the data and the model, we can perform an analysis very easily:
# +
data = DataList(ogip)
jl = JointLikelihood(new_model, data)
best_fit_parameters, likelihood_values = jl.fit()
# -
# The output of the fit() method of the JointLikelihood object consists of two pandas DataFrame objects, which can be queried, saved to disk, reloaded and so on. Refer to the [pandas manual](http://pandas.pydata.org/pandas-docs/stable/dsintro.html#dataframe) for details.
#
# After the fit the JointLikelihood instance will have a .results attribute which contains the results of the fit.
jl.results.display()
# This object can be saved to disk in a FITS file:
jl.results.write_to("my_results.fits", overwrite=True)
# The produced FITS file contains the complete definition of the model and of the results, so it can be reloaded in a separate session as:
# +
results_reloaded = load_analysis_results("my_results.fits")
results_reloaded.display()
# -
# The flux of the source can be computed from the 'results' object (even in another session by reloading the FITS file), as:
# +
fluxes = jl.results.get_flux(100 * u.keV, 1 * u.MeV)
# Same results would be obtained with
# fluxes = results_reloaded.get_point_source_flux(100 * u.keV, 1 * u.MeV)
# -
# We can also plot the spectrum with its error region, as:
fig = plot_spectra(jl.results, ene_min=0.1, ene_max=1e6, num_ene=500,
flux_unit='erg / (cm2 s)')
# ### Bayesian analysis
# In a very similar way, we can also perform a Bayesian analysis. As a first step, we need to define the priors for all parameters:
# +
# It can be set using the currently defined boundaries
new_model.source1.spectrum.main.Powerlaw.index.set_uninformative_prior(Uniform_prior)
# or uniform prior can be defined directly, like:
new_model.source1.spectrum.main.Powerlaw.index.prior = Uniform_prior(lower_bound=-3,
upper_bound=0)
# The same for the Log_uniform prior
new_model.source1.spectrum.main.Powerlaw.K.prior = Log_uniform_prior(lower_bound=1e-3,
upper_bound=100)
# or
new_model.source1.spectrum.main.Powerlaw.K.set_uninformative_prior(Log_uniform_prior)
new_model.display(complete=True)
# -
# Then, we can perform our Bayesian analysis like:
bs = BayesianAnalysis(new_model, data)
bs.set_sampler('ultranest')
bs.sampler.setup()
# This uses the emcee sampler
samples = bs.sample(quiet=True)
# The BayesianAnalysis object will now have a "results" member which will work exactly the same as explained for the Maximum Likelihood analysis (see above):
bs.results.display()
fluxes_bs = bs.results.get_flux(100 * u.keV, 1 * u.MeV)
fig = plot_spectra(bs.results, ene_min=0.1, ene_max=1e6, num_ene=500,
flux_unit='erg / (cm2 s)')
# We can also produce easily a "corner plot", like:
bs.results.corner_plot();
| docs/notebooks/The_3ML_workflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # LAT Gamma-Ray Burst Analysis
#
# This procedure provides a step-by-step example of extracting and modeling a LAT Gamma-Ray Burst observation and modeling the prompt and temporally extended emissions using the X-Ray Spectral Fitting Package (**Xspec**) and **gtlike**, respectively. It should be noted that the LAT Low Energy (LLE) data products can also be used for LAT-detected GRBs (see [GRB Analysis Using GTBurst](https://fermidev.gsfc.nasa.gov/ssc/data/analysis/scitools/gtburst.html)).
# ## Prerequisites
#
# * [gtbin](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtbin.txt)
# * [gtdiffrsp](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtdiffrsp.txt)
# * [gtexpmap](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtexpmap.txt)
# * [gtfindsrc](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtfindsrc.txt)
# * [gtltcube](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtltcube.txt)
# * [gtmktime](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtmktime.txt)
# * [gtrspgen](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtrspgen.txt)
# * [gtselect](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtselect.txt)
# * [gtvcut](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/help/gtvcut.txt)
# * XSPEC, used as a spectral analysis tool in Step 3 of this procedure (See [Xanadu Data Analysis for X-Ray Astronomy](http://heasarc.gsfc.nasa.gov/docs/xanadu/).)
# * The FITS viewer [*fv*](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/heasarc.gsfc.nasa.gov/ftools/fv.html)
# * The astronomical imaging and data visualization application [*ds9*](http://hea-www.harvard.edu/RD/ds9/)
# ## Assumptions
#
# It is assumed that:
#
# * The referenced files reside in your working directory.
# * You know the time and location of the burst you wish to analyze.
#
# Note: For this thread, we will analyze GRB080916C, one of the brightest LAT GRBs on record. The relevant burst properties are:
#
# * T0 = 00:12:45.614 UT, 16 September 2008, corresponding to 243216766.614 seconds (MET)
# * Trigger # 243216766
# * RA = 121.8 degrees
# * Dec = -61.3 degrees
#
# * You have extracted the files used in this tutorial. You can download them in the code cell below, or you can extract them yourself in the [LAT Data Server](http://fermi.gsfc.nasa.gov/cgi-bin/ssc/LAT/LATDataQuery.cgi) with the following selections:
# ```
# GRB080916C
# Search Center (RA,Dec) = (121.8,-61.3)
# Radius = 40 degrees
# Start Time (MET) = 243216266.6 seconds (2011-03-28T00:00:00)
# Stop Time (MET) = 243218766.6 seconds (2011-04-04T00:00:00)
# Minimum Energy = 100 MeV
# Maximum Energy = 300000 MeV
# ```
# In this case, the GRB in question is of a sufficiently short duration, e.g. ~10's of seconds, so that the accumulation of LAT background counts is negligible. In order to study delayed emission, e.g. 10's of minutes to ~hour timescales, a likelihood analysis will be required.
# !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/latGrbAnalysis/LAT_GRB_analysis.tgz
# !mkdir data
# !mv LAT_GRB_analysis.tgz ./data
# !tar -xzvf ./data/LAT_GRB_analysis.tgz -C ./data
# # Steps:
#
# 1. [Localize the GRB.](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/lat_grb_analysis.html#TS)
# 2. [Generating the analysis files.](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/lat_grb_analysis.html#FILESGEN)
# 3. [Binned analysis with XSPEC (prompt emission).](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/lat_grb_analysis.html#XSPEC)
# 4. [Unbinned analysis using gtlike (extended emission).](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/lat_grb_analysis.html#GTLIKE)
#
# **NOTE**: During the analysis of the prompt emission (Steps 1 to 3) we will make use of the `P8R3_TRANSIENT020_V2` [response function](http://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_LAT_IRFs/IRF_overview.html), while in the analysis of the extended emission (Step 4) the `P8R3_SOURCE_V2` response function will be used.
# # 1. Localize the GRB
#
# **a) Select LAT data during prompt burst phase**
#
# This can either be done using a time interval ascertained from data from other instruments (e.g., using the GBM trigger time and T90 values reported in the [Fermi/GBM circular](http://gcn.gsfc.nasa.gov/gcn3/8245.gcn3)), or it can be estimated directly from the LAT light curve. Open the light curve `lc_zmax100.fits` with [*fv*](http://heasarc.nasa.gov/ftools/fv/):
# !fv ./data/LAT_GRB_analysis/lc_zmax100.fits
# You should get something that looks like this:
#
# <img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/latGrbAnalysis/LAT1_fvzmax100LCerrors.png'>
# Here, we have plotted TIME-243216766 on the x-axis (with TIMEDEL as error) and COUNTS on the y-axis (with ERROR as error). Hovering the cursor over the plot will yield its x-y coordinates; in this case, a plausible estimate of the LAT emission interval is (T0, T0+40s).
#
# We run **gtselect** to extract the data for this time interval.
#
# Remember to set `evclass=16` on the command line to ensure that we retain the transient class events:
# + language="bash"
# gtselect evclass=16
# ./data/LAT_GRB_analysis/filtered_zmax100.fits
# ./data/LAT_GRB_analysis/localize_zmax100.fits
# INDEF
# INDEF
# 15
# 243216766
# 243216806
# 100
# 300000
# 100
# -
# Note that we have also reduced the acceptance cone to 15 degrees to filter out non-burst photons.
# **b) Run the localization tools, gtfindsrc and gtbin**
#
# If the data are essentially background-free as is the case here with a burst duration of ~50 sec, one can run the localization tools **gtfindsrc** and **gtbin** directly on the FT1 file (obtained when downloading the data file from the FSSC LAT Data server).
#
# **gtfindsrc** is necessary to centroid the GRB. For longer intervals where the background is significant, we can model the instrumental and celestial backgrounds using diffuse model components. For these data, the integration time is about 40 seconds so the diffuse and instrumental background components will make a negligible contribution to the total counts, so we proceed assuming they are negligible.
#
# We run **gtfindsrc** first to find the local maximum of the log-likelihood of a point source model as well as an estimate of the error radius. We will use this information to specify the size of the TS map in order to ensure that it contains the error circles we desire.
# + language="bash"
# gtfindsrc
# ./data/LAT_GRB_analysis/localize_zmax100.fits
# ./data/LAT_GRB_analysis/L1506171634094365357F22_SC00.fits
# ./data/LAT_GRB_analysis/GRB080916C_gtfindsrc.txt
# P8R3_TRANSIENT020_V3
# none
# none
# none
#
# CEL
# 121.8
# -61.3
# MINUIT
# 1e-2
# 0.01
# -
# In this example of running **gtfindsrc**, the `FT2.fits` file was the renamed spacecraft data file downloaded from the FSSC LAT Data server.
#
# Since our source model comprises only a point source to represent the signal from the GRB, we do not provide a source model file or a target source name.
#
# Similarly, since the exposure map is used for diffuse components, we do not need to provide an unbinned exposure map. Use of a livetime cube will make the point source exposure calculation faster, but for integrations less than 1000 s, it is generally not needed.
# We have now obtained a position of maximum likelihood; we will use (119.861, -56.581) as our burst location from now on. It should be noted that GRB080916C is an exceptionally bright event in the LAT, and centroiding it with **gtfindsrc** is fast and adequate. In many other cases, a GRB may have far fewer LAT counts and the creation of a counts map using **gtbin** will be useful in localizing it:
# + language="bash"
# gtbin
# CMAP
# ./data/LAT_GRB_analysis/localize_zmax100.fits
# ./data/LAT_GRB_analysis/GRB080916C_counts_map.fits
# NONE
# 30
# 30
# 0.2
# CEL
# 119.861
# -56.581
# 0
# AIT
# -
# We can now view the counts map in *ds9*:
# !ds9 ./data/LAT_GRB_analysis/GRB080916C_counts_map.fits
# The counts map should look something like this:
#
# <img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/latGrbAnalysis/LAT2_ds9countsmap.png'>
# # 2. Generating the analysis files
#
# In this subsection, we'll use the same data we extracted as for the localization analysis above.
#
# The purpose is to illustrate the steps necessary to model a GRB that is significantly fainter than GRB080916C; i.e., one for which the residual and diffuse backgrounds need to be modeled. This means that we will include diffuse components in the model definition and that will necessitate the exposure map calculation in order for the code to compute the predicted number of events. We'll see from the fit to the data that these diffuse components do indeed provide a negligible contribution to the overall counts for this burst.
# **a) Data subselection**
#
# Rerun **gtselect** with (119.861, -56.581) as the new search center:
# + language="bash"
# gtselect evclass=16
# ./data/LAT_GRB_analysis/filtered_zmax100.fits
# ./data/LAT_GRB_analysis/prompt_select.fits
# 119.861
# -56.581
# 15
# 243216766
# 243216806
# 100
# 300000
# 100
# -
# **b) Model definition**
#
# The model will include a point source at the GRB location, an isotropic component (to represent the extragalactic diffuse and/or the residual background), and a Galactic diffuse component that uses the recommend Galactic diffuse model, `gal_2yearp7v6_v0.fits`. This file is available at the [LAT background models](http://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html) page via the [FSSC Data Access](http://fermi.gsfc.nasa.gov/ssc/data/access/) page.
#
# The easiest way to generate a simple 3 component model like this would be to use the [modeleditor](http://www.slac.stanford.edu/exp/glast/wb/prod/pages/sciTools_modeleditor/modelEditor.html) program (included in the [Fermitools](http://fermi.gsfc.nasa.gov/ssc/data/analysis/software/)) by typing `ModelEditor` at the prompt.
# <img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/latGrbAnalysis/LAT3_modEd.png'>
#
# Here, we have added three sources to our model:
#
# 1. GRB_080916C (you can rename the source by typing into the "Source Name:" text input box), with a PowerLaw2 spectrum. (The [Model Selection](http://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Likelihood/Model_Selection.html) page of the Cicerone lists the possible spectral models.)
#
# We have adjusted the Lower Limit of its spectrum to be 100.0. We have also inputted the RA and Dec (calculated from gtfindsrc) into its spatial model. We have kept all other default values.
#
#
# 2. GALPROP Diffuse (there is a specific option for this in the "Source" menu). Edit the `File:` entry of the spatial model to point to your local copy of `gll_iem_v07.fits`. We have kept all other defaults.
#
#
# 3. Extragalactic Diffuse (there is a specific option for this). We have kept all the default values.
#
# If our analysis region had been close to any known LAT sources, we would have had to include them in our model (see this [tutorial](http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html#createSourceModel)).
# The xml file `GRB080916C_model.xml` should look like this:
#
# ```xml
# <?xml version="1.0" ?>
# <source_library title="Source Library" xmlns="http://fermi.gsfc.nasa.gov/source_library">
# <source name="GRB_080916C" type="PointSource">
# <spectrum type="PowerLaw2">
# <parameter free="true" max="1000.0" min="1e-05" name="Integral" scale="1e-06" value="1.0"/>
# <parameter free="true" max="-1.0" min="-5.0" name="Index" scale="1.0" value="-2.0"/>
# <parameter free="false" max="200000.0" min="20.0" name="LowerLimit" scale="1.0" value="20.0"/>
# <parameter free="false" max="200000.0" min="20.0" name="UpperLimit" scale="1.0" value="200000.0"/>
# </spectrum>
# <spatialModel type="SkyDirFunction">
# <parameter free="false" max="360.0" min="0.0" name="RA" scale="1.0" value="119.861"/>
# <parameter free="false" max="90.0" min="-90.0" name="DEC" scale="1.0" value="-56.581"/>
# </spatialModel>
# </source>
# <source name="GALPROP Diffuse Source" type="DiffuseSource">
# <spectrum type="ConstantValue">
# <parameter free="true" max="10.0" min="0.0" name="Value" scale="1.0" value="1.0"/>
# </spectrum>
# <spatialModel file="$(FERMI_DIR)/refdata/fermi/galdiffuse/gll_iem_v07.fits" type="MapCubeFunction">
# <parameter free="false" max="1000.0" min="0.001" name="Normalization" scale="1.0" value="1.0"/>
# </spatialModel>
# </source>
# <source name="Extragalactic Diffuse Source" type="DiffuseSource">
# <spectrum type="PowerLaw">
# <parameter free="true" max="100.0" min="1e-05" name="Prefactor" scale="1e-07" value="1.6"/>
# <parameter free="false" max="-1.0" min="-3.5" name="Index" scale="1.0" value="-2.1"/>
# <parameter free="false" max="200.0" min="50.0" name="Scale" scale="1.0" value="100.0"/>
# </spectrum>
# <spatialModel type="ConstantValue">
# <parameter free="false" max="10.0" min="0.0" name="Value" scale="1.0" value="1.0"/>
# </spatialModel>
# </source>
# </source_library>
# ```
# You can also create and edit model files by hand rather than use the modeleditor so long as the sources have the correct formats.
# For your convenience, you can create a local copy of the xml by running the python script below.
with open('./data/LAT_GRB_analysis/GRB080916C_model.xml', 'w') as file:
file.write("""<?xml version="1.0" ?>
<source_library title="Source Library" xmlns="http://fermi.gsfc.nasa.gov/source_library">
<source name="GRB_080916C" type="PointSource">
<spectrum type="PowerLaw2">
<parameter free="true" max="1000.0" min="1e-05" name="Integral" scale="1e-06" value="1.0"/>
<parameter free="true" max="-1.0" min="-5.0" name="Index" scale="1.0" value="-2.0"/>
<parameter free="false" max="200000.0" min="20.0" name="LowerLimit" scale="1.0" value="20.0"/>
<parameter free="false" max="200000.0" min="20.0" name="UpperLimit" scale="1.0" value="200000.0"/>
</spectrum>
<spatialModel type="SkyDirFunction">
<parameter free="false" max="360.0" min="0.0" name="RA" scale="1.0" value="119.861"/>
<parameter free="false" max="90.0" min="-90.0" name="DEC" scale="1.0" value="-56.581"/>
</spatialModel>
</source>
<source name="GALPROP Diffuse Source" type="DiffuseSource">
<spectrum type="ConstantValue">
<parameter free="true" max="10.0" min="0.0" name="Value" scale="1.0" value="1.0"/>
</spectrum>
<spatialModel file="$(FERMI_DIR)/refdata/fermi/galdiffuse/gll_iem_v07.fits" type="MapCubeFunction">
<parameter free="false" max="1000.0" min="0.001" name="Normalization" scale="1.0" value="1.0"/>
</spatialModel>
</source>
<source name="Extragalactic Diffuse Source" type="DiffuseSource">
<spectrum type="PowerLaw">
<parameter free="true" max="100.0" min="1e-05" name="Prefactor" scale="1e-07" value="1.6"/>
<parameter free="false" max="-1.0" min="-3.5" name="Index" scale="1.0" value="-2.1"/>
<parameter free="false" max="200.0" min="50.0" name="Scale" scale="1.0" value="100.0"/>
</spectrum>
<spatialModel type="ConstantValue">
<parameter free="false" max="10.0" min="0.0" name="Value" scale="1.0" value="1.0"/>
</spatialModel>
</source>
</source_library>""")
# **c) Refining the good time intervals (GTIs)**
#
# In general, our next step would be to run **gtmktime** to remove the time intervals whose events fell outside of our zenith angle cut and apply temporal cuts to the data based on the spacecraft file (`FT2.fits`). However, as our data encompasses a short period of time, this step is inappropriate in this case (**gtmktime** will report errors).
#
# It would be necessary if were analyzing a longer period of time such as a longer burst, or extended emission as at the end of this thread (see the [Likelihood Tutorial](http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html) for more information).
#
# Also, if we use **gtvcut** to review the file `prompt_select.fits`, we can see that the GTIs span the entire time selection we have made.
# **d) Diffuse response calculation**
#
# Since we are dealing with `evclass=16` (transient class) events, we need to run the **gtdiffrsp** tool.
#
# For each diffuse component in the model, the **gtdiffrsp** tool populates the `DIFRSP0` and `DIFRSP1` columns. They contain the integral over the source extent (for the Galactic and isotropic components this is essentially the entire sky) of the source intensity spatial distribution times the PSF and effective area. It computes the counts model density of the various diffuse components at each measured photon location, arrival time, and energy, and this information is used in maximizing the likelihood computation. This integral is also computed for the point sources in the model, but since those sources are delta-functions in sky position, the spatial part of the integral is trivial.
#
# Note that the large size of the [new Galactic diffuse background model](http://fermi.gsfc.nasa.gov/ssc/data/access/lat/BackgroundModels.html) makes this a very resource-intensive process.
# !wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/aux/gll_iem_v07.fits
# !mv gll_iem_v07.fits $FERMI_DIR/refdata/fermi/galdiffuse
# + language="bash"
# gtdiffrsp
# ./data/LAT_GRB_analysis/prompt_select.fits
# ./data/LAT_GRB_analysis/FT2.fits
# ./data/LAT_GRB_analysis/GRB080916C_model.xml
# P8R3_TRANSIENT020_V3
# -
# As mentioned before, **gtdiffrsp** modifies the input file by adding values to the `DIFRSP0` and `DIFRSP1` columns. In the tar file, for comparison purposes, the user can find two copies of the input file, one used as input of **gtdiffrsp** (named `prompt_select_pre_gtdiffrsp.fits`) and one obtained after running with **gtdiffrsp** and with the columns modified (named `prompt_select.fits`).
# **e) Livetime cube generation**
#
# For analysis of longer time intervals, we would need to run **gtltcube** to calculate a livetime cube. For this analysis, this step is unnecessary due to the short timescales involved.
# **f) Exposure map generation**
#
# We now use **gtexpmap** to generate the [exposure map](http://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/Cicerone_Data_Exploration/livetime_and_exposure.html). Note that the exposure maps from this tool are intended for use with **unbinned likelihood analysis only**:
# + language="bash"
# gtexpmap
# ./data/LAT_GRB_analysis/prompt_select.fits
# ./data/LAT_GRB_analysis/FT2.fits
# none
# ./data/LAT_GRB_analysis/prompt_expmap.fits
# P8R3_TRANSIENT020_V3
# 25
# 100
# 100
# 20
# -
# The radius of the source region should be larger than the extraction region in the FT1 data in order to account for PSF tail contributions of sources just outside the extraction region.
#
# For energies down to 100 MeV, a 10 degree buffer is recommended (i.e., the total radius is the sum of the extraction radius and the buffer area, totaling 25 in our case); for higher energy lower bounds, e.g., 1 GeV, 5 degrees or less is acceptable.
# Again, note that we did not provide an "exposure hypercube" (the livetime cube) file.
#
# For data durations less than about 1ks, **gtexpmap** will execute faster doing the time integration over the livetimes in the FT2 file directly. For longer integrations, computing the livetime cube with **gtltcube** will be faster (more information can be found in the [Explore LAT Data section](http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/explore_latdata.html)). At this step, the flux and spectral shape of the GRB prompt emission can be estimated using the **gtlike** tool (see section 4f).
# # 3. Binned analysis with XSPEC (prompt emission)
#
# We will now perform a spectral analysis on the prompt emission using XSPEC. (A basic knowledge of the use of XSPEC is assumed.)
#
# This requires a `PHA` (spectral) file and a `RSP` (response) file. It should be noted that as an alternative to XSPEC, the RMFIT software (available as a user contribution) can be used for spectral modeling; however, it is not distributed as part of the Fermitools.
# **a) Generating PHA and RSP files**
#
# We use **gtbin** to create the `PHA1` file (the choice of `PHA1` for `Type of output file` indicates that you want to create a `PHA` file — the standard FITS file containing a single binned spectrum — spanning the entire time range):
# + language="bash"
# gtbin
# PHA1
# ./data/LAT_GRB_analysis/prompt_select.fits
# ./data/LAT_GRB_analysis/080916C_LAT.pha
# ./data/LAT_GRB_analysis/FT2.fits
# LOG
# 100
# 300000
# 30
# -
# The **gtrspgen** tool is then run to generate an XSPEC-compatible response matrix from the LAT IRFs.
# + language="bash"
# gtrspgen
# PS
# ./data/LAT_GRB_analysis/080916C_LAT.pha
# ./data/LAT_GRB_analysis/FT2.fits
# ./data/LAT_GRB_analysis/080916C_LAT.rsp
# 90
# 0.5
# CALDB
# LOG
# 100
# 300000
# 100
# -
# **Notes**:
#
# * One should always use the `PS` response calculation method despite the option of using `GRB`. The latter was a method used in the early stages of the software creation but was later never fully developed. Ultimately, the `PS` method should always be more accurate, in particular for longer bursts. For short bursts, the difference in results and execution time between `PS` and `GRB` is negligible.
#
#
# * In **gtrspgen** you choose the incident photon energy bins; i.e., the energy bins over which the incident photon model is computed. **gtrspgen** reads the output photon channel energy grid from the PHA file. The RSP created by **gtrspgen** is the mapping from the incident photon energy bins into the output photon channels. These incident photon energy bins need not be the same as the output channels and they should generally over-sample them:
# * If there are only a few channels then the calculation of the expected number of photons in each channel will be more accurate if there are more incident photon energy bins.
# * You might want to include some incident photon energy bins above and below the range of channels to account for the LAT's finite energy resolution. Incident energy bins above the highest channel energy is particularly important if some for the photon's energy leaks out of the detector.
# **b) Backgrounds**
#
# For the prompt emission of GRB 080916C (and most LAT bursts), there is minimal background contamination. For analyses of longer integrations, one can estimate the background using off-source regions as for more traditional X-ray analyses.
# **c) Running XSPEC**
#
# You now have the two files necessary to analyze the burst spectrum with XSPEC:
#
# * A PHA file with the spectrum.
# * A RSP file with the response function.
#
# Note that there is no background file. All non-burst sources are expected to produce less than 1 photon in the extraction region during the burst! Here we provide the simplest example of fitting a spectrum with XSPEC; for further details you should consult the [XSPEC manual](http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/).
# #### 1. Start XSPEC
#
# **Note**: The default version is now release 12 (XSPEC12).
# #### 2. Load in the data:
# ```%%bash
# >>xspec
# data ./data/080916C_LAT.pha```
# When you specify a data file, XSPEC will try to load the response file in the PHA file's header. Alternatively, you can specify the response file separately with the command `response 080916C_LAT.rsp`.
#
# We now load in a power law model for fitting the data. For more information on available models, see [this example](http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/xspec11/manual/node26.html).
# #### 3. Load the model:
# ```%%bash
# >>xspec
# model pow```
# #### 4. Set XSPEC to plot the data and to select the statistical method for fitting:
# ```bash
# >>xspec
# cpd /xs
# setplot energy
# plot ldata chi
# statistic cstat```
# The `cpd` command sets the current plotting device, which in this case is the `xserve` option (an xwindow that persists after XSPEC has been closed).
#
# The next two commands tell XSPEC to create a logarithmic (the "l" of `ldata`) plot of the energy (along the x-axis), using the data file specified before, with the fit statistic. (Consult the [manual](http://heasarc.gsfc.nasa.gov/docs/xanadu/xspec/xspec11/manual/node26.html) for another example.)
#
# It is important to note that, for LAT GRB analysis, we generally want to use the C-statistic instead of chi-squared due to the small number of counts. (However, the command for plotting is still `chi` or `chisq` regardless of the statistic used.) We have set this in the last step.
# #### 5. Perform a fit and plot the results:
# ```%%bash
# >>xspec
# fit
# plot ldata resid
# plot ldata ratio```
# They should all be invoked in the same xspec instance, so combining all of the steps above will yield:
# + language="bash"
# #For ldata resid
# xspec
# data ./data/LAT_GRB_analysis/080916C_LAT.pha
# model pow
#
#
# cpd /xs
# setplot energy
# plot ldata chi
# statistic cstat
# fit
# plot ldata resid
# -
# This will give you something that looks like: <img src="https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/latGrbAnalysis/LAT4_xspec_ldata_res.png">
# + language="bash"
# # For ldata ratio
# xspec
# data ./data/LAT_GRB_analysis/080916C_LAT.pha
# model pow
#
#
# cpd /xs
# setplot energy
# plot ldata chi
# statistic cstat
# fit
# plot ldata ratio
# -
# And this will give you something that looks like: <img src="https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/latGrbAnalysis/LAT5_xspec_ldata_ratio.png">
# # 4. Unbinned analysis using gtlike (temporally expanded emission)
#
# **a) Data subselection**
#
# Here, we will search for emission which may occur after the prompt GRB event; temporally extended high-energy emission has been detected in a large number of LAT bursts.
#
# We rerun **gtselect** on a time interval of ~40 to 400 seconds after the trigger on the file downloaded from the archive (i.e. the EV file) and renamed `FT1.fits`, choosing to [exclude "transient"](http://fermi.gsfc.nasa.gov/ssc/data/analysis/LAT_caveats.html) class photons for the analysis of extended emission. (A longer interval has been chosen to demonstrate **gtmktime**, **gtltcube**, etc.)
#
# Remember to set `evclass=128` on the command line to ensure that we use the source class events.
# Make a copy of the EV file and rename it to FT1.fits.
# !cp ./data/LAT_GRB_analysis/L1506171634094365357F22_EV00.fits ./data/LAT_GRB_analysis/FT1.fits
# + language="bash"
# gtselect evclass=128
# ./data/LAT_GRB_analysis/FT1.fits
# ./data/LAT_GRB_analysis/extended_select.fits
# 119.861
# -56.581
# 15
# 243216806
# 243217166
# 100
# 300000
# 100
# -
# **b) Refining the GTIs**
#
# Since our subselection encompasses a longer period of time, we run gtmktime to exclude bad time intervals with the filter expression suggested in the [Cicerone](https://fermi.gsfc.nasa.gov/ssc/data/analysis/documentation/Cicerone/):
# + language="bash"
# gtmktime
# ./data/LAT_GRB_analysis/FT2.fits
# (DATA_QUAL>0)&&(LAT_CONFIG==1)
# yes
# ./data/LAT_GRB_analysis/extended_select.fits
# ./data/LAT_GRB_analysis/extended_mktime.fits
# -
# Note: In an analysis of *transient* class events, we set the data quality portion of the filter expression to `DATA_QUAL>0` to retain these events.
# **c) Diffuse response calculation**
#
# We run now **gtdiffrsp**, making sure to use the correct response function.
#
# Again, note that the pass 8 Galactic diffuse background model causes this to be very resource-intensive. The tool modifies the input event data file, inserting values in the `DIFRSP0` and `DIFRSP1` columns.
#
# <!--- (A copy of the input file (named `extended_mktime_pre_gtdiffrsp.fits`), generated before running **gtdiffrsp**, can be found in the [tar file](https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/data/latGrbAnalysis/LAT_GRB_analysis.tgz).) --->
# + language="bash"
# gtdiffrsp
# ./data/LAT_GRB_analysis/extended_mktime.fits
# ./data/LAT_GRB_analysis/FT2.fits
# ./data/LAT_GRB_analysis/GRB080916C_model.xml
# P8R3_SOURCE_V3
# -
# **d) Livetime cube generation**
#
# Now that our data file encompasses a longer period of time, it requires us to calculate the livetime cube using **gtltcube**:
# + language="bash"
# gtltcube
# ./data/LAT_GRB_analysis/extended_mktime.fits
# ./data/LAT_GRB_analysis/FT2.fits
# ./data/LAT_GRB_analysis/extended_ltcube.fits
# 0.025
# 0.5
# -
# **e) Exposure map generation**
#
# This time we will specify a livetime cube file:
# + language="bash"
# gtexpmap
# ./data/LAT_GRB_analysis/extended_mktime.fits
# ./data/LAT_GRB_analysis/FT2.fits
# ./data/LAT_GRB_analysis/extended_ltcube.fits
# ./data/LAT_GRB_analysis/extended_expmap.fits
# P8R3_SOURCE_V3
# 25
# 100
# 100
# 20
# -
# **f) Calculating the likelihood**
#
# We will use **gtlike** for this analysis. The `plot=yes` command brings up a plot of the fit results; `results=results.dat` saves a copy of the fit results to the file `results.dat`.
# + language="bash"
# gtlike plot=yes results=./data/LAT_GRB_analysis/results.dat
# UNBINNED
# ./data/LAT_GRB_analysis/FT2.fits
# ./data/LAT_GRB_analysis/extended_mktime.fits
# ./data/LAT_GRB_analysis/extended_expmap.fits
# ./data/LAT_GRB_analysis/extended_ltcube.fits
# ./data/LAT_GRB_analysis/GRB080916C_model.xml
# P8R3_SOURCE_V3
# MINUIT
# -
# The plot produced by **gtlike** shows the fitting results: The red, green, and blue lines correspond to the fit of the extragalactic diffuse background, the galactic diffuse background, and the GRB, respectively (as explained in the [Run gtlike](http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html#runLikelihood) section of the [Likelihood Tutorial](http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/likelihood_tutorial.html)). The black line is the sum of the 3 components.
#
# In our model, we included the galactic and extragalactic diffuse backgrounds. In a real analysis, we would have also included any known sources within ~20 or so degrees of the burst.
#
# <img src='https://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/images/latGrbAnalysis/LAT6_gtlike.png'>
| GRBAnalysis/1.LATGRBAnalysis/1.LATGRBAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Project
# For this project you have 4 files containing information about persons.
#
# The files are:
# * `personal_info.csv` - personal information such as name, gender, etc. (one row per person)
# * `vehicles.csv` - what vehicle people own (one row per person)
# * `employment.csv` - where a person is employed (one row per person)
# * `update_status.csv` - when the person's data was created and last updated
#
# Each file contains a key, `SSN`, which **uniquely** identifies a person.
#
# This key is present in **all** four files.
#
# You are guaranteed that the same SSN value is present in **every** file, and that it only appears **once per file**.
#
# In addition, the files are all sorted by SSN, i.e. the SSN values appear in the same order in each file.
# ##### Goal 1
#
# Your first task is to create iterators for each of the four files that contained cleaned up data, of the correct type (e.g. string, int, date, etc), and represented by a named tuple.
#
# For now these four iterators are just separate, independent iterators.
# ##### Goal 2
#
# Create a single iterable that combines all the columns from all the iterators.
#
# The iterable should yield named tuples containing all the columns.
# Make sure that the SSN's across the files match!
#
# All the files are guaranteed to be in SSN sort order, and every SSN is unique, and every SSN appears in every file.
#
# Make sure the SSN is not repeated 4 times - one time per row is enough!
# ##### Goal 3
#
# Next, you want to identify any stale records, where stale simply means the record has not been updated since 3/1/2017 (e.g. last update date < 3/1/2017). Create an iterator that only contains current records (i.e. not stale) based on the `last_updated` field from the `status_update` file.
# ##### Goal 4
#
# Find the largest group of car makes for each gender.
#
# Possibly more than one such group per gender exists (equal sizes).
# #### Hints
# You will not be able to use a simple split approach here, as I explain in the video.
#
# Instead you should use the `csv` module and the `reader` function.
#
# Here's a simple example of how to use it - you will need to expand on this for your project goals, but this is a good starting point.
# +
import csv
def read_file(file_name):
with open(file_name) as f:
rows = csv.reader(f, delimiter=',', quotechar='"')
yield from rows
# +
from itertools import islice
rows = read_file('personal_info.csv')
for row in islice(rows, 5):
print(row)
# -
# As you can see, the data is already separated into a list containing the individual fields - but of course they are all just strings.
# ### Good luck!
| python-tuts/1-intermediate/04 - Iteration tools/Project /Project - Description.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Pigaro-Petra06/Software_Engineering_Petra_Aria_Pendung/blob/main/pytest.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="mQX9YgTGHmo3"
# print("hai") ==> command
a = 10 # ==> integer
b = 3.1421332 # ==> Float sama double tetep sama
c = "Pigaro" # ==> String cukup tambah kutip
d = "<NAME>"
f = True # ==> boolean
g = False # ==> boolean
h = [] # ==> list
i = {} # ==> dictionary
j = () # ==> tuple
# + colab={"base_uri": "https://localhost:8080/"} id="efvT7QCoMNl-" outputId="695d3254-f1e1-48d5-cbdf-cc64454c9a71"
#Command
#Print
print("<NAME>")
print("Hello")
# + colab={"base_uri": "https://localhost:8080/"} id="FzMUW4FgMRWD" outputId="2df535e0-4daf-427a-8e65-1bf2fd6e12fc"
print(2)
print(3+10+15)
# + colab={"base_uri": "https://localhost:8080/"} id="cVWY604SMU_6" outputId="274706a1-1dd4-47c1-c496-1b9d29851d38"
print(d)
#concate
print("Hello My name " + d)
print("Hello My name {}".format(d))
# + colab={"base_uri": "https://localhost:8080/"} id="tpRA-67JMYC9" outputId="48d86766-998f-489e-e834-4b83ff3e6657"
print("nilai adalah " + str(a)) #ubah ke string dulu
print("nilai adalah {}".format(a))
# + [markdown] id="JJfvMFM6RLV4"
# List
# + colab={"base_uri": "https://localhost:8080/"} id="dsXPKaMHM6z_" outputId="0c93b375-bb16-4e03-ebda-009562f5c915"
print("My Name {}, My Final Score is {}, and i'm from Kalimantan {}".format(d,a,f))
# + id="-L5DkvNHNb7T"
Sekolah = ["Mobil", 20, True, 3.14, ["budi","Rani","dodo"], {"Mobil":"ferari"}]
# + colab={"base_uri": "https://localhost:8080/"} id="VbX5Oh4zN6De" outputId="67e1a4ca-0ace-415d-ae3f-cad34894e7d0"
Sekolah[4]
# + colab={"base_uri": "https://localhost:8080/"} id="M_JntQ0FPFu0" outputId="cf35b7d8-5608-4d64-e212-3d2a3a0b06a0"
Sekolah[4][0:2] #ngambil list yang di dalam list
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="h0VJp0qhPLIQ" outputId="0e5140b8-96b6-496e-beca-9885f6cf5400"
Sekolah[-1]["Mobil"]
# + id="Qo69JJGwPVGC"
Nama_Siswa = ["budi", "bubu","rara","riri","rana","jojo","juju"]
# + colab={"base_uri": "https://localhost:8080/"} id="-UuB53_YVOeh" outputId="b6cc7960-6b80-4a99-e5b6-d8a029d320ab"
Nama_Siswa[0:7:2] #Assingment
# + colab={"base_uri": "https://localhost:8080/"} id="5hLxPSbOPhkq" outputId="1e2b1ffd-3bc6-4cc9-de11-033802e8308a"
Nama_Siswa[3:6] #Range ngambil list
# + colab={"base_uri": "https://localhost:8080/"} id="hVSDixAyPr_m" outputId="67b59c6b-6b8f-4887-a3f6-36adfcb72a7a"
Nama_Siswa[-3:] #Ngambil 3 dari belakang
# + id="XlxzcV3IQXzm"
Dict = {"Mobil":"ferari", 0:22, "Boolean":"Benar"}
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="uxIgBOu8QrYb" outputId="76df0154-390c-4b6d-a7d1-8f4681ba4151"
Dict["Mobil"]
# + colab={"base_uri": "https://localhost:8080/"} id="ax8AHopLQ194" outputId="9968449c-3c5f-4c7d-be37-ba15189123db"
Dict[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="Zkfxx_R0REze" outputId="84aff6a8-1e36-4a44-b128-b6f333d8b87f"
Dict["Boolean"]
# + [markdown] id="EheOvWqUSCvK"
# All List Shown Up
# + colab={"base_uri": "https://localhost:8080/"} id="vrnydJ38Rjrs" outputId="a1cddbaf-c5d3-4f9d-879e-2152884e73c8"
Sekolah[:] # all list shown
# + colab={"base_uri": "https://localhost:8080/"} id="hKWYCuOrR8JV" outputId="d6fc944a-64b8-4c7f-ec87-579a8920e6db"
Dict
# + colab={"base_uri": "https://localhost:8080/"} id="7-QyYaXKR-q-" outputId="c195cae8-763d-4353-97b6-727a935fd950"
Sekolah[0:7]
| pytest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Object detection
import cv2
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
shape=cv2.imread('castle.jpg',cv2.IMREAD_COLOR)
#waldosPic=cv2.imread('sunflowersPic.jpg',0)
gray_shape=cv2.cvtColor(shape,cv2.COLOR_BGR2GRAY)
fig=plt.figure()
# subplot 1
a=fig.add_subplot(1,2,1)
plt.imshow(shape,cmap='nipy_spectral')
a.set_title("castle")
#subplot 2
a=fig.add_subplot(1,2,2)
plt.imshow(gray_shape)
a.set_title("gray scale")
#fig=plt.figure(figsize=(100,100),dpi=80,facecolor='w',edgecolor='k')
plt.show()
# -
# Harrison corners
gray_shape=np.float32(gray_shape)
har_cor=cv2.cornerHarris(gray_shape,3,3,0.01)
kernel=np.ones((3,3),np.uint8)
har_cor=cv2.dilate(har_cor,kernel,iterations=1)
shape[har_cor>0.03*har_cor.max()]=[255,0,0]
plt.imshow(shape)
gray=gray_shape
good_feature_corners=cv2.goodFeaturesToTrack(gray,500,0.02,10)
for i in good_feature_corners:
x,y=i[0]
cv2.rectangle(shape,(int(x-5),int(y-5)),(int(x+5),int(y+5)),(0,0,255),2)
plt.imshow(shape)
orb=cv2.ORB_create(5000)
key=orb.detect(gray_shape,None)
key,desc=orb.compute(gray_shape,key)
image=cv2.drawKeypoints(shape,key,cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow("orb detection",image)
cv2.waitKey()
# +
## Object detection
import cv2
import matplotlib.pyplot as plt
import numpy as np
# %matplotlib inline
def orb_detector(image,template):
'''
hey this the documentation part
'''
image=cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)
orb=cv2.ORB_create(1000,1.2)
(kp,des)=orb.detectAndCompute(image,None)
(kp_temp,des_temp)=orb.detectAndCompute(template,None)
bf=cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True)
match=bf.match(des,des_temp)
#matches=sorted(match,key=lambda val: val.distance)
return len(match )
# -
img_template=cv2.imread('panasonic_template.jpeg',0)
plt.imshow(img_template)
cap=cv2.VideoCapture(0)
while True:
ret,frame=cap.read()
height,width=frame.shape[:2]
tlx=width//4
tly=(height//2)+(height//4)
btrx=(width-tlx)
btry=(height//2)-(height//4)
cv2.rectangle(frame,(tlx,tly),(btrx,btry),(255,0,0),3)
cropped=frame[btry:tly,tlx:btrx]
#frame=cv2.flip(frame,1)
matches=orb_detector(cropped,img_template)
output_string="matches "+str(matches)
cv2.putText(frame,output_string,(50,450),cv2.FONT_HERSHEY_COMPLEX,2,(250,0,0),2)
threshold=100
if matches>threshold:
cv2.rectangle(frame,(tlx,tly),(btrx,btry),(0,255,0),3)
cv2.putText(frame,output_string,(50,45),cv2.FONT_HERSHEY_COMPLEX,2,(250,25,0),2)
cv2.imshow("image detection using orb",frame)
if cv2.waitKey(1)==13:
break
cap.release()
cv2.destroyAllWindows()
# # HOG
gray=gray_shape
cell_size=(2,2)
block_size=(10,10)
nbins=5
hog=cv2.HOGDescriptor(_winSize=(gray.shape[1]//cell_size[1]*cell_size[1],\
gray.shape[0]//cell_size[0]*cell_size[0]),
_blockSize=(block_size[1]*cell_size[1],block_size[0]*cell_size[0])
,_blockStride=cell_size,_cellSize=cell_size,_nbins=nbins)
n_cells=(gray.shape[0]//cell_size[0],gray.shape[1]//cell_size[1])
hog_feats=hog.compute(gray).reshape(n_cells[1]-block_size[1]+1,n_cells[0]-block_size[0]+1,\
block_size[0],block_size[1],nbins).transpose((1,0,2,3,4))
# +
gradients=np.zeros((n_cells[0],n_cells[1],nbins))
cell_count=np.full((n_cells[0],n_cells[1],1),0,dtype=int)
for o in range(block_size[0]):
for p in range(block_size[1]):
gradients[o:n_cells[0]-block_size[0]+o+1,p:n_cells[1]-block_size[1]+p+1]+=\
hog_feats[:,:,o,p,:]
cell_count[o:n_cells[0]-block_size[0]+o+1,\
p:n_cells[1]-block_size[1]+p+1]+=1
gradients/=cell_count
# -
color_bins=4
plt.pcolor(gradients[:,:,color_bins])
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal',adjustable='box')
plt.colorbar()
plt.ion()
plt.show()
#cv2.destroyAllWindows()
| openCV_tutorials/Objet detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # MiRAC-A
# The following example demonstrates the use of MiRAC-A data collected during ACLOUD, AFLUX and MOSAiC-ACA. The Microwave Radar/Radiometer for Arctic Clouds (MiRAC) consists of an active component, a 94 GHz Frequency Modulated Continuous Wave (FMCW) cloud radar, and a passive 89 GHz microwave radiometer. MiRAC-A is mounted on Polar 5 with a fixed viewing angle of 25° against flight direction.
#
# More information on the instrument can be found in [Mech et al. (2019)](https://amt.copernicus.org/articles/12/5019/2019/). If you have questions or if you would like to use the data for a publication, please don't hesitate to get in contact with the dataset authors as stated in the dataset attributes `contact` or `author`.
#
# ## Data access
# * To analyse the data they first have to be loaded by importing the (AC)³airborne meta data catalogue. To do so the ac3airborne package has to be installed. More information on how to do that and about the catalog can be found [here](https://github.com/igmk/ac3airborne-intake#ac3airborne-intake-catalogue).
# -
# ## Get data
import ac3airborne
cat = ac3airborne.get_intake_catalog()
list(cat.P5.MIRAC_A)
# ```{note}
# Have a look at the attributes of the xarray dataset `ds_mirac_a` for all relevant information on the dataset, such as author, contact, or citation information.
# ```
ds_mirac_a = cat['P5']['MIRAC_A']['ACLOUD_P5_RF05'].to_dask()
ds_mirac_a
# The dataset includes the radar reflectivity (`Ze`, `Ze_unfiltered`), the radar reflectivity filter mask (`Ze_flag`), the 89 GHz brightness temperature (`TB_89`) as well as information on the aircraft's flight altitude (`altitude`). The radar reflectivity is defined on a regular `time`-`height` grid with corresponding target positions (`lat`, `lon`). The full dataset is available on PANGAEA.
# + [markdown] tags=[]
# ## Load Polar 5 flight phase information
# Polar 5 flights are divided into segments to easily access start and end times of flight patterns. For more information have a look at the respective [github](https://github.com/igmk/flight-phase-separation) repository.
#
# At first we want to load the flight segments of (AC)³airborne
# -
meta = ac3airborne.get_flight_segments()
# The following command lists all flight segments into the dictionary `segments`
segments = {s.get("segment_id"): {**s, "flight_id": flight["flight_id"]}
for platform in meta.values()
for flight in platform.values()
for s in flight["segments"]
}
# In this example we want to look at a high-level segment during ACLOUD RF05
seg = segments["ACLOUD_P5_RF05_hl09"]
# Using the start and end times of the segment `ACLOUD_P5_RF05_hl09` stored in `seg`, we slice the MiRAC data to this flight section.
ds_mirac_a_sel = ds_mirac_a.sel(time=slice(seg["start"], seg["end"]))
# ## Plots
# The flight section during ACLOUD RF05 is flown at about 3 km altitude in west-east direction during a cold-air outbreak event perpendicular to the wind field. Clearly one can identify the roll-cloud structure in the radar reflectivity and the 89 GHz brightness temperature.
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from numpy import log10
plt.style.use("../mplstyle/book")
# +
fig, (ax1, ax2) = plt.subplots(2, 1, sharex=True)
# 1st: plot flight altitude and radar reflectivity
ax1.plot(ds_mirac_a_sel.time, ds_mirac_a_sel.altitude*1e-3, label='Flight altitude', color='k')
im = ax1.pcolormesh(ds_mirac_a_sel.time, ds_mirac_a_sel.height*1e-3, 10*log10(ds_mirac_a_sel.Ze).T, vmin=-40, vmax=30, cmap='jet', shading='nearest')
fig.colorbar(im, ax=ax1, label='Radar reflectivity [dBz]')
ax1.set_ylim(-0.25, 3.5)
ax1.set_ylabel('Height [km]')
ax1.legend(frameon=False, loc='upper left')
# 2nd: plot 89 GHz TB
ax2.plot(ds_mirac_a_sel.time, ds_mirac_a_sel.TB_89, label='Tb(89 GHz)', color='k')
ax2.set_ylim(177, 195)
ax2.set_ylabel('$T_b$ [K]')
ax2.set_xlabel('Time (hh:mm) [UTC]')
ax2.legend(frameon=False, loc='upper left')
ax2.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
plt.show()
# -
| how_to_ac3airborne/datasets/mirac_a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1) Dataset Walkthrough
# ## 1.1) Imports
import numpy as np
import pandas as pd
import seaborn as sns
# ## 1.2) Loading Dataset
df = pd.read_csv('BlackFriday.csv')
df.head()
# ## 1.3) Dataset Information
df.info()
# ## 1.4) Checking Null Values
df.isnull().sum()
# ## 1.5) Removing Product Category 2 & 3 Columns
del df['Product_Category_2']
del df['Product_Category_3']
# ## 1.6) Checking Uniques values per Column
for i in df.columns:
print(i,":",df[i].nunique())
# # Assumptions
#
# 1. Who is more likely to spend more on Black Friday i.e Man/Woman, Married/Unmarried, What age group
# 2. What type of products people purchase more i.e Category 1, 2 or 3
# 3. Who spent more i.e Old/New customer
# 4. From which area people spend more
# 5. From what type of occupation people spend more
# ## 2.1) Analysing Gender Column
#
# +
data = pd.DataFrame({'Density': [len(df[df['Gender'] == 'M']),len(df[df['Gender'] == 'F'])]},
index=['Male', 'Female'])
plot = data.plot.pie(y='Density', figsize=(5, 5), autopct = '%.1f')
# -
df.groupby('Gender').size().plot(kind = 'pie', autopct = '%.1f',
title = 'Gender Chart',
figsize = (5,5))
df.groupby('Gender').size().plot(kind = 'bar', figsize = (6,6), title = 'Gender Chart')
# ## 2.2) Average Amount Spend per Product by Gender
# +
print('Female : ' , round(df[df['Gender']=='F']['Purchase'].sum()/len(df[df['Gender']=='F']),2))
print('Male : ' , round(df[df['Gender']=='M']['Purchase'].sum()/len(df[df['Gender']=='M']),2))
f = round(df[df['Gender']=='F']['Purchase'].sum()/len(df[df['Gender']=='F']),2)
m = round(df[df['Gender']=='M']['Purchase'].sum()/len(df[df['Gender']=='M']),2)
# +
data = pd.DataFrame({'Density': [m,f]},
index=['Male', 'Female'])
plot = data.plot.pie(y='Density', figsize=(5, 5), autopct = '%.1f')
# -
# ## 2.3) Analysing Product Distribution based on Age
df.groupby('Age').size().plot(kind = 'bar', figsize = (12,6), title = 'Age Chart')
# ## 2.4) Analysing Unique Products Purchased based on Age
# +
lst = []
for i in df['Age'].unique():
lst.append([i,len(df[df['Age'] == i]['Product_ID'].unique())])
data = pd.DataFrame(lst, columns = ['Age', 'Products']).sort_values(by = 'Age')
data.plot.bar(x = 'Age', y = 'Products', figsize=(8, 6))
# -
# ## 2.5) Analysing Amount spend by Age
df.groupby('Age').sum()['Purchase'].plot(kind = 'bar', figsize = (8,6))
# ## 2.6) Average Amount spend by Age
df.groupby('Age').mean()['Purchase'].plot(kind = 'bar', figsize = (8,6))
# ## 2.7) Analysing Marital Status Distribution
df.groupby('Marital_Status').size().plot(kind = 'pie', autopct = '%.1f',
title = 'Marital Status',
figsize = (6,6))
# ## 2.8) Analyse Purchase by Age WRT Gender
sns.set(rc = {'figure.figsize':(12,6)})
sns.countplot(x = 'Age', hue = 'Gender', data = df)
# ## 2.9) Analyse Purchase by Marital Status WRT Gender
sns.set(rc = {'figure.figsize':(6,6)})
sns.countplot(x = 'Marital_Status', hue = 'Gender', data = df)
# ## 2.10) Analyse Purchase based on City Category
sns.countplot(x = df['City_Category'])
df.groupby('City_Category').size().plot(kind = 'pie', autopct = '%.1f',
title = 'City Category',
figsize = (6,6))
# ## 2.11) Analyse Purchase by City Category WRT Gender
sns.set(rc = {'figure.figsize':(6,6)})
sns.countplot(x = 'City_Category', hue = 'Gender', data = df)
# ## 2.12) Total Amount Spend on products by City Category
df.groupby('City_Category').sum()['Purchase'].plot(kind = 'pie', autopct = '%.1f',
title = 'City Category',
figsize = (6,6))
# ## 2.13) Average amount spend on products by City Category
df.groupby('City_Category').mean()['Purchase'].plot(kind = 'pie', autopct = '%.1f',
title = 'City Category',
figsize = (6,6))
# ## 2.14) Analyse Purchase by City Category
sns.set(rc = {'figure.figsize':(10,6)})
sns.countplot(x = df['City_Category'])
sns.set(rc = {'figure.figsize':(10,6)})
sns.countplot(x = 'City_Category', hue = 'Marital_Status', data = df)
sns.set(rc = {'figure.figsize':(10,6)})
sns.countplot(x = df['City_Category'], hue=df['Age'])
# ## 2.15) Analyse Purchase WRT Stay in Current City
sns.countplot(x = df['Stay_In_Current_City_Years'])
df.groupby('Stay_In_Current_City_Years').sum()['Purchase'].sort_values().plot(kind = 'bar')
df.groupby('Stay_In_Current_City_Years').sum()['Purchase'].plot(kind = 'pie', autopct = '%.1f',
title = 'City Category',
figsize = (6,6))
# ## 2.16) Analyse Occupation
sns.countplot(x = df['Occupation'])
df.groupby('Occupation').size().sort_values().plot(kind = 'bar')
# +
sns.set(rc = {'figure.figsize':(15,7)})
sns.countplot(x = df['Occupation'], hue= df['Marital_Status'])
# -
df.groupby('Occupation').nunique()['Product_ID'].plot(kind = 'bar')
df.groupby('Occupation').nunique()['Product_ID'].sort_values().plot(kind = 'bar')
# ## 2.17) Analyse Product Category
df.groupby('Product_Category_1')['Purchase'].sum().plot(kind = 'bar')
df.groupby('Product_Category_1')['Purchase'].sum().sort_values().plot(kind = 'bar')
df.groupby('Product_Category_1')['Purchase'].size().plot(kind = 'bar')
df.groupby('Product_Category_1').size().sort_values().plot(kind = 'bar')
df.groupby('Product_Category_1')['Purchase'].mean().plot(kind = 'bar')
df.groupby('Product_Category_1')['Purchase'].mean().sort_values().plot(kind = 'bar')
# ## 2.18) Analyse Product ID
df.groupby('Product_ID')['Purchase'].sum().nlargest(10).sort_values().plot(kind = 'barh')
df.groupby('Product_ID').size().nlargest(10).sort_values().plot(kind = 'barh')
# ## 2.19) Combining Marital Status & Gender
# +
l = []
for i in range(len(df)):
l.append(str(df['Gender'][i]) +"_"+ str(df['Marital_Status'][i]))
df['MarritalGender'] = l
# -
df.head()
sns.countplot(x = df['MarritalGender'])
# ## 2.20) Advance Analysis Marital + Gender Combined Column
sns.countplot(x = df['Age'],hue=df['MarritalGender'])
sns.countplot(x = df['Product_Category_1'],hue=df['MarritalGender'])
sns.countplot(x = df['Stay_In_Current_City_Years'],hue=df['MarritalGender'])
sns.countplot(x = df['City_Category'],hue=df['MarritalGender'])
sns.countplot(x = df['Age'],hue=df['MarritalGender'])
df.head()
# # Conclusion
#
# 1. Men are more likely to spend on sales like Black Friday
# 2. People who are unmarried spend more on Black Friday
# 3. People of age group 26-35 spend more on Black Friday and kids are less likely to spend money on sales
# 4. People who are located in C spend more on purcahasing stuff on Black Friday
# 5. People of occupation type 0,4 and 7 spend more on Black Friday
# 6. People who are newly shifted in this city spend more on BF and people who are staying as guest in this city spend less on sales
# 7. Products P00025442 and P00110742 are very demanding
# 8. Irrespective of occupation the proportion of products being purchased is generic. i.e. category 1, 5 are most bought by customers of any occupation and similary, 13-18 categories are bought in less numbers compared to other products.
#
| Customer Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import pickle
from tqdm import tqdm
from tsfresh import select_features, extract_features
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import EfficientFCParameters
from tsfresh.feature_extraction.settings import from_columns
train1 = pd.read_csv("dataset/train.csv", nrows = 30000000,
dtype={'acoustic_data': np.int16, 'time_to_failure': np.float64}).reset_index()
| Notebooks/Earthquake Prediction/.ipynb_checkpoints/Earthquake Prediction-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="t3kLdO3EsK9n" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="c039e10a-996e-4b46-f2bc-f09fdff4638c"
# !pwd
# + id="BccqO-Hlsxoi" colab_type="code" colab={}
# mkdir -p '/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_3/data'
# + id="ZFH67E54vAMD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="b802d717-5926-4929-9634-e01bcc65aad5"
# cd '/content/drive/My Drive/Colab Notebooks/dw_matrix/matrix_3'
# + id="qlB_9cOAvOcA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="604cb152-5c97-4cbe-e620-0718dcd1266a"
# !ls
# + id="JMGqLFeTvn5H" colab_type="code" colab={}
#GIT_TOKEN = '.............
#GIT_URL = 'https:/{0}@/github.com/lidkalee/dw_matrix_road_sign.git'.format(GIT_TOKEN)
# + id="xGJa4F8dxhSm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="48a7cd4c-c736-484a-c04b-a716fd324e54"
# cd data
# + id="ujPLUZSWxv_J" colab_type="code" colab={}
# Miejsca gdzie przechowywane są dane
bit.ly/train_road_sign
bit.ly/test_road_sign
bit.ly/dw_signnames
# + id="RrnKnIBdyCe7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="bbc44e04-fec8-40ee-b0ee-643f2885f24c"
# !curl -L http://bit.ly/dw_signnames -o signnames.csv
# + id="71b3lsOpyPYE" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="474dc56c-0e3d-4281-f9ae-354cae7abdd7"
# ls
# + id="_X3_zRV8yoUA" colab_type="code" colab={}
import pandas as pd
# + id="coDY3VPaytCd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="7ed9bd21-1906-4aef-8d41-26ba5f4ddaf2"
train = pd.read_pickle('train.p')
train.keys()
# + id="L6qeAsanywbm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="19a32f2d-cab9-4cb3-a291-20bece583244"
X_train, y_train = train['features'], train['labels']
X_train.shape, y_train.shape
# + id="qeim2TEczKI0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 348} outputId="2f2aff0c-308c-4253-99ee-902b49ed5a07"
pd.read_csv('signnames.csv').sample(10)
# + id="lxtlrBPFza6-" colab_type="code" colab={}
| matrix3_day1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/carmenortegal/matem-ticasfinancierasII/blob/main/RandomWalk1D.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="QKEHvC5wF7Ls"
# # Random Walk 1D
#
#
# + id="rK-Oh4r1GDI7"
#En primer lugar, se instalan las librerias con diferentes funcionalidades, necesarias para el desarrollo del problema
import random #generación de números aleatorios
import matplotlib.pyplot as plt #generación de gráficos
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="iyZ6ps1KF55_" outputId="dc27812a-30af-4ae0-a335-2f4cedfc8f8d"
random.seed()
precio = 100 # Fijamos un precio inicial de cotización
n = 1000 # establecemos el número de sesiones sesiones de bolsa
factor = 1.1 # establecemos el factor de incremento
p = .5 # probabilidad de subida
listaprecio = [precio] # inicializamos la serie de precios
#Como la probabilidad de subida es de 0.5, si el número aleatorio es menor que 0.5, el precio será multiplicado por el factor de subida (subirá), por el contrario,
#si el número aleatorio es mayor que 0.5, el precio se dividirá por el factor, por lo que se supone que este bajará.
for t in range(1,n):
if random.random() < p:
precio *= factor #*= -> precio=precio*factor
else:
precio /= factor #/= -> precio=precio/factor
listaprecio.append(precio)
plt.plot(range(n),listaprecio)
plt.show()
| RandomWalk1D.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
# %matplotlib inline
import gym
import math
import gym_miniworld
import matplotlib.pyplot as plt
from environment.env_utils import get_data
from dataset_utils.multiview_data import MultiViewData
env = gym.make("MiniWorld-PickupObjs-v0")
env.domain_rand = True
env.max_episode_steps = math.inf
agent_fov = 90
steps_per_epoch = 21
epochs = 2
multiview_data = MultiViewData(episode_duration=steps_per_epoch,
data_buffer_size=int(1e5),
batch_size=16)
multiview_data.load(data_folder="../data/")
x, y = multiview_data.get_sample()
plt.imshow(x[0][:,0,:,:].permute(1, 2, 0).cpu())
plt.show()
plt.imshow(x[0][:,1,:,:].permute(1, 2, 0).cpu())
plt.show()
plt.imshow(y[0].permute(1, 2, 0).cpu())
plt.show()
# + pycharm={"name": "#%%\n"}
for i in range(epochs):
_ = env.reset()
env.agent.cam_fov_y = agent_fov
obs, top_down_obs = get_data(env)
multiview_data.append_obs(obs, top_down_obs, new_episode=True)
for _ in range(steps_per_epoch-1):
_, reward, done, info = env.step(env.actions.turn_right)
obs, top_down_obs = get_data(env)
multiview_data.append_obs(obs, top_down_obs)
x, y = multiview_data.get_sample()
plt.imshow(x[0][:,0,:,:].permute(1, 2, 0).cpu())
plt.show()
plt.imshow(x[0][:,1,:,:].permute(1, 2, 0).cpu())
plt.show()
plt.imshow(y[0].permute(1, 2, 0).cpu())
plt.show()
env.close()
| environment/env_multiview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import cv2
# %matplotlib inline
# Read in the image
image = cv2.imread('images/brain_MR.jpg')
# Change color to RGB (from BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
# +
# Convert the image to grayscale for processing
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt.imshow(gray, cmap='gray')
# -
# ### Implement Canny edge detection
# +
# Try Canny using "wide" and "tight" thresholds
wide = cv2.Canny(gray, 30, 100)
tight = cv2.Canny(gray, 200, 240)
# Display the images
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(20,10))
ax1.set_title('wide')
ax1.imshow(wide, cmap='gray')
ax2.set_title('tight')
ax2.imshow(tight, cmap='gray')
# -
# ### TODO: Try to find the edges of this flower
#
# Set a small enough threshold to isolate the boundary of the flower.
# +
# Read in the image
image = cv2.imread('images/sunflower.jpg')
# Change color to RGB (from BGR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image)
# +
# Convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
print(np.max(gray), " ", np.min(gray))
# TODO: Define lower and upper thresholds for hysteresis
# right now the threshold is so small and low that it will pick up a lot of noise
lower = 190
upper = 250
edges = cv2.Canny(gray, lower, upper)
plt.figure(figsize=(20,10))
plt.imshow(edges, cmap='gray')
# -
| 1_2_Convolutional_Filters_Edge_Detection/.ipynb_checkpoints/5. Canny Edge Detection-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# EDA Summaries in Python
# -
import os
os.chdir("MyPath/Python/Chapter_04")
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
TW = pd.read_csv("Data/TheWALL.csv",delimiter=',')
TW['Score'].quantile([0,0.25,0.5,0.75,1])
np.diff(TW['Score'].quantile([0,0.25,0.5,0.75,1]))
TW['Score'].quantile(np.arange(0,1.1,.1))
np.diff(TW['Score'].quantile(np.arange(0,1.1,.1)))
TW['HA_Ind'].value_counts()
TW.boxplot(column='Score',by='HA_Ind')
plt.show()
TW.query('Score<200').boxplot(column='Score',by='HA_Ind')
plt.show()
| Chapter04/Python/Output/Chapter_04_Exploratory_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Stem Plots
# ============
#
# Stem plots are commonly used to visualise discrete distributions of data,
# and are useful to highlight discrete observations where the precision of values along
# one axis is high (e.g. an independent spatial measure like depth) and the other is less
# so (such that the sampling frequency along this axis is important, which is not
# emphasised by a scatter plot).
#
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from pyrolite.plot import pyroplot
from pyrolite.plot.stem import stem
np.random.seed(82)
# -
# First let's create some example data:
#
#
#
x = np.linspace(0, 10, 10) + np.random.randn(10) / 2.0
y = np.random.rand(10)
df = pd.DataFrame(np.vstack([x, y]).T, columns=["Depth", "Fe3O4"])
# A minimal stem plot can be constructed as follows:
#
#
ax = df.pyroplot.stem(color="k", figsize=(5, 3))
# Stem plots can also be used in a vertical orientation, such as for visualising
# discrete observations down a drill hole:
#
#
#
ax = df.pyroplot.stem(
orientation="vertical",
s=12,
linestyle="--",
linewidth=0.5,
color="k",
figsize=(3, 5),
)
# the yaxes can then be inverted using:
ax.invert_yaxis()
# and if you'd like the xaxis to be labeled at the top:
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
| docs/source/examples/plotting/stem.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kordusmonika/class_regr/blob/master/Adults3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="hSsp-H3de4ZK" colab_type="code" colab={}
# !pip install qgrid
# !pip install --upgrade tables
# !pip install ensemble
# !pip install scikit-plot
# + id="ajkLuCxwe_TJ" colab_type="code" colab={}
import pandas as pd
import numpy as np
np.random.seed(2019)
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.dummy import DummyClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from scikitplot.estimators import plot_learning_curve
import xgboost as xgb
from functools import partial
from hyperopt import hp, fmin, tpe, STATUS_OK, Trials
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# + id="uWCBScw_fDU6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="91f0a881-e83e-42d8-f9ce-8242405e208a"
# cd /content/drive/My Drive/Colab Notebooks/class_regr/data
# + id="Q0Xd9IgTfHVG" colab_type="code" colab={}
df = pd.read_hdf('/content/drive/My Drive/Colab Notebooks/class_regr/data/train.adult (1).h5')
# + id="8-_ZYo97fOJj" colab_type="code" colab={}
def check_missing_values():
for column in df.columns:
missing = column, df[column].isnull().sum()
if missing[1] == 0:continue
print(missing)
def get_feats(df):
black_list = ['Target', 'Target_cat', 'fnlwgt']
feats = df.select_dtypes(include=[np.number] ).columns
feats = [feat for feat in feats if feat not in black_list]
return feats
def train_and_predict(X, y, model):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
return accuracy_score(y_test, y_pred)
# + id="VyWkRumPfVtp" colab_type="code" colab={}
df = df.fillna(-1)
# + id="LJVCaVHjfYX3" colab_type="code" colab={}
cat_feats = df.select_dtypes(include=[np.object] ).columns
for cat_feat in cat_feats:
df['{0}_cat'.format(cat_feat)] = pd.factorize( df[cat_feat] )[0]
# + [markdown] id="bG3eve3ggPkb" colab_type="text"
# #Feature Engineering
# + id="obZIBs3zgOZS" colab_type="code" colab={}
df['Black_husband'] = df[ ['Relationship','Race'] ].apply(lambda x: 1 if (x['Relationship'] == 'Husband') & (x['Race'] == 'Black') else 0, axis=1 )
df['White_husband'] = df[ ['Relationship', 'Race'] ].apply(lambda x: 1 if (x['Relationship'] == 'Husband') & (x['Race'] == 'White') else 0, axis = 1)
df['White_wife'] = df[ ['Relationship', 'Race'] ].apply(lambda x: 1 if (x['Relationship'] == 'Wife') & (x['Race'] == 'White') else 0, axis = 1)
df['Black_wife'] = df[ ['Relationship', 'Race'] ].apply(lambda x: 1 if (x['Relationship'] == 'Wife') & (x['Race'] == 'Black') else 0, axis = 1)
df['White'] = df['Race'].apply(lambda x: 1 if x == 'White' else 0)
df['Black'] = df['Race'].apply(lambda x: 1 if x == 'Black' else 0)
df['Other'] = df['Race'].apply(lambda x: 1 if (x!='White') & (x!='Black') else 0)
df['Husband'] = df['Relationship'].apply(lambda x: 1 if x == 'Husband' else 0)
df['Wife'] = df['Relationship'].apply(lambda x: 1 if x == 'Wife' else 0)
df['Other_race']=df['Race'].apply(lambda x: 1 if (x!='White') & (x!='Black') else 0)
df['Divorced'] = df['Martial Status'].apply(lambda x: 1 if x == 'Divorced' else 0)
df['Married-civ-spouse']=df['Martial Status'].apply(lambda x: 1 if x=='Married-civ-spouse' else 0)
df['Never-married']=df['Martial Status'].apply(lambda x: 1 if x=='Never-married' else 0)
df['Country_us']=df['Country'].apply(lambda x: 1 if x =='United-States' else 0)
df['Country_mex']=df['Country'].apply(lambda x: 1 if x =='Mexico' else 0)
df['Country_other']=df['Country'].apply(lambda x: 1 if (x !='Mexico') & (x!='United-States') else 0)
df['Occ_white']=pd.factorize(df[['Occupation_cat','White']].apply(lambda x: '{0}-{1}'.format(x['Occupation_cat'], x['White']),axis=1))[0]
df['Occ_other']=pd.factorize(df[['Occupation_cat','Other_race']].apply(lambda x: '{0}-{1}'.format(x['Occupation_cat'], x['Other_race']),axis=1))[0]
df['Productive_age']=df['Age'].apply(lambda x: 1 if (x >=20) & (x<=60) else 0)
df['Master_bachelor']=df['Education'].apply(lambda x: 1 if (x =='Bachelors') | (x=='Masters') else 0)
df['Doctor_prof']=df['Education'].apply(lambda x: 1 if (x =='Prof-school') | (x=='Doctorate') else 0)
df['Occ_sex']=pd.factorize(df[['Occupation_cat','Sex']].apply(lambda x: '{0}-{1}'.format(x['Occupation_cat'], x['Sex']),axis=1))[0]
df['Occ_rel_sex']=pd.factorize(df[['Occupation_cat','Relationship_cat','Sex']].apply(lambda x: '{0}-{1}-{2}'.format(x['Occupation_cat'], x['Relationship_cat'],x['Sex']),axis=1))[0]
df['Married_productive']=pd.factorize(df[['Married-civ-spouse','Productive_age']].apply(lambda x: '{0}-{1}'.format(x['Married-civ-spouse'], x['Productive_age']),axis=1))[0]
# + id="GFHXBLHBgOWH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 153} outputId="a062936b-ccfa-4365-9cef-52f458cabf4a"
df.columns
# + [markdown] id="Kq3OASaGgnr7" colab_type="text"
# #Basic XGBoost
# + id="dkydN72Vf45r" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="43b9d318-2ab9-4b6b-e1a7-833ca72e58d2"
X = df [ get_feats(df) ].values
y = df[ 'Target_cat'].values
xgb_params = {
'max_depth': 10,
'n_estimators': 100,
}
train_and_predict(X, y, xgb.XGBClassifier(**xgb_params ))
# + [markdown] id="sMYO3iUFhJet" colab_type="text"
# #Hyperoptimisation
# + id="lhsR2R0wgOcm" colab_type="code" colab={}
def get_feats(df):
black_list = ['Target', 'Target_cat', 'fnlwgt']
feats = df.select_dtypes(include=[np.number] ).columns
feats = [feat for feat in feats if feat not in black_list]
return feats
def get_X_y(df, feats=None):
if feats is None:
feats = get_feats(df)
X = df[feats].values
y = df[ 'Target_cat'].values
return X,y
def model_train_predict(X, y, model, success_metric = accuracy_score ):
cv = StratifiedKFold(n_splits=4, random_state=0, shuffle=True)
scores = []
for train_idx, test_idx in cv.split(X,y):
model.fit(X[train_idx], y[train_idx])
y_pred = model.predict(X[test_idx])
score = success_metric(y[test_idx], y_pred)
scores.append(score)
return np.mean(scores), np.std(scores)
# + [markdown] id="vQEv9NBXogQp" colab_type="text"
# #Basic XGB (with StratifiedKFold) and 36 features ~86,7%
# + id="kzW3MY50gOfu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="b09bf12d-9b17-4195-8f0b-80458518b51a"
X,y = get_X_y(df)
modelxgb = xgb.XGBClassifier(
max_depth=5,
random_state=2019,
n_estimators=70,
max_features=15,
)
model_train_predict(X, y, modelxgb)
# + id="PcDoGtPYgOi1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="a1d27263-a8f8-4d04-f590-3b230e6aeaac"
X.shape
# + id="INvH4KTLgOmB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="9ee2a987-4adf-4367-addc-7a4497b24f0d"
def objective(space):
xgb_params={
'max_depth': int(space['max_depth']),
'colsample_bytree': space['colsample_bytree'],
'learning_rate': space['learning_rate'],
'subsample': space['subsample'],
'random_state': int(space['random_state']),
'min_child_weight': int(space['min_child_weight']),
'reg_alpha': space['reg_alpha'],
'reg_lambda': space['reg_lambda'],
'n_estimators': 100,
'objective': 'reg:squarederror'
}
cv = StratifiedKFold(n_splits=4, random_state=0, shuffle=True)
model = xgb.XGBClassifier(**xgb_params)
for train_idx, test_idx in cv.split(X,y):
model.fit(X[train_idx], y[train_idx])
y_pred = model.predict(X[test_idx])
score = accuracy_score(y[test_idx], y_pred)
return {'loss':score, 'status': STATUS_OK } #zwracamy score
space ={
'max_depth': hp.quniform ('x_max_depth', 5, 20, 1), #q = wartosci dyskretne
'colsample_bytree': hp.uniform ('x_colsample_bytree', 0.8, 1.),
'learning_rate': hp.uniform ('x_learning_rate', 0.05, 0.2),
'subsample': hp.uniform ('x_subsample', 0.7, 1.),
'random_state': hp.quniform ('x_random_state', 0, 10000, 50),
'min_child_weight': hp.quniform ('x_min_child_weight', 1, 10, 1),
'reg_alpha': hp.loguniform ('x_reg_alpha', 0., 1.), #skala logarytmiczna
'reg_lambda': hp.uniform ('x_reg_lambda', 0.7, 1.),
}
trials = Trials()
best_params = fmin(fn=objective,
space = space,
algo = tpe.suggest,
trials = trials,
max_evals=30
)
print('The best params: ', best_params)
# + [markdown] id="fqkztjPkuDTR" colab_type="text"
# After tuning the parameters we achieved merely ~85,78 % which is less than our basic XGBoost model
# + id="-G4uVWFPgOpM" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 312} outputId="e4b8649e-26fd-498a-fe85-1ddc01d7a4e9"
model = xgb.XGBClassifier(**xgb_params)
plot_learning_curve(model, X, y, cv=4, random_state=0, shuffle=True)
# + id="MgjsVkAEgOsQ" colab_type="code" colab={}
# + id="vz9WaYltgOvW" colab_type="code" colab={}
# + id="h4bYg5OZf5PQ" colab_type="code" colab={}
# + id="oaOf7j2UhLfl" colab_type="code" colab={}
| Adults3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
#
# # A little bit of SQL(ite)
# ### and very little Pandas
# <br><br>
# <div style="background-color:rgba(128, 128, 0, 0.1); text-align:left; vertical-align: middle; padding:20px 0;">
# <p style="font-size:134%;color:Deep Teal;">SC 4125: Developing Data Products</p>
# <p style="font-size:100%;color:Olive;">Module-1: Introduction</p><br>
#
# <br>
# by <a href="https://personal.ntu.edu.sg/anwitaman/" style="font-size:100%;color:Deep Teal;"><NAME></a><br>
# School of Computer Science and Engineering, NTU Singapore.
# </div>
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# #### Teaching material
# - <a href="M1-Intro.pdf">.pdf</a> deck of slides (complements the html slides)
# - <a href="M1-Intro.slides.html">.html</a> deck of slides
# - <a href="M1-Intro.ipynb">.ipynb</a> Jupyter notebook
# + [markdown] slideshow={"slide_type": "slide"}
# This is companion material for <a href="Module-01.pdf">Module-1: Introduction</a> lecture for the <b>Developing Data Products</b> course.<br>
#
# The executable source <a href="M1-Examples.ipynb">.ipynb notebook</a> file for these HTML slides, and the <a href="CountriesDB.db">CountriesDB.db</a> file that goes with it can be found through the links.
#
# <img src="pics/ETL.png" alt="SQLLite screenshot" width="500"/>
# + [markdown] slideshow={"slide_type": "slide"}
# ### A very non-systematic review/recap
#
# You are expected to have already taken introductory courses
# - SC 1015: Introduction to Data Science & AI
# - SC 2207: Introduction to Database Systems
# + [markdown] slideshow={"slide_type": "subslide"}
# #### Some useful references for beginners/catching-up:
#
# https://www.sqlitetutorial.net/
#
# https://pandas.pydata.org/pandas-docs/stable/getting_started/tutorials.html
# + [markdown] slideshow={"slide_type": "slide"}
# #### <b>SQLite Browser</b>
#
# https://sqlitebrowser.org/
#
# <img src="pics/SQLiteScreenShot.png" alt="SQLLite screenshot" width="600"/>
# + [markdown] slideshow={"slide_type": "slide"}
# This notebook is accompanied with <a href="CountriesDB.db">CountriesDB.db</a> file, which comprises of three tables. The tables correspond to the <a href="https://www.kaggle.com/fernandol/countries-of-the-world">Countries of the world</a> and <a href="https://www.kaggle.com/codingelements/gapminder">gapminder</a> data obtained from <a href="https://www.kaggle.com/">Kaggle</a>, and the <a href="https://en.wikipedia.org/wiki/2016_Summer_Olympics_medal_table">Olympics 2016 medals data obtained from Wikipedia</a>.
#
# You may want to install and use the <b><a href="https://sqlitebrowser.org/">SQLite Browser</a></b> to check out the data in the .db file (as shown in the previous panel). You may also want to check out the source files indicated above.
#
# It is easy to import the .csv (and other similar formats, such as .tsv) directly using the graphical interface of SQLite Browser. Nevertheless, we will next explore how to do so, and other actions such as manipulate and query the data from the Jupyter programming environment.
# + [markdown] slideshow={"slide_type": "slide"}
# #### Import modules
#
# Let's start with importing necessary modules. We will also check the environment/version/etc.
# + slideshow={"slide_type": "subslide"}
# Housekeeping: We will first carry out the imports and print version numbers
import sys
print("Python version: " +str(sys.version))
import numpy as np
import pandas as pd
print("Numpy version: " +str(np.version.version))
print("Pandas version: " +str(pd.__version__))
#import xml.etree.ElementTree as ET
import sqlite3
print("SQLite version: " + str(sqlite3.version))
# + slideshow={"slide_type": "subslide"}
# ! jupyter --version
# + slideshow={"slide_type": "skip"}
# load data from .tsv file (as an alternate to loading from the DB file)
gapminderdatapath ='data/gapminder/' # change this to adjust relative path
gap_df = pd.read_csv(gapminderdatapath+'gapminder.tsv', sep='\t')
print(gap_df)
print(gap_df.columns.to_list())
print(gap_df[gap_df['country']=='Afghanistan'])
country_names_series=gap_df['country'].drop_duplicates()
country_names_array=gap_df['country'].unique()
# + [markdown] slideshow={"slide_type": "slide"}
# We begin by connecting to the CountriesDB.db database, using the <b>sqlite3.connect()</b> function.
# + slideshow={"slide_type": "fragment"}
conn = sqlite3.connect('CountriesDB.db')
# + [markdown] slideshow={"slide_type": "fragment"}
# We then create a cursor object using the <b>cursor()</b> function. We determine the tables stored in the database, we can query (using a SELECT statement) a special table called the <b>sqlite_schema</b>. It can also be referenced as sqlite_master.
#
# To retrieve data after executing a SELECT statement, one can either treat the cursor as an iterator, call the cursor’s <b>fetchone()</b> method to retrieve a single matching row, or call <b>fetchall()</b> to get a list of the matching rows.
# + slideshow={"slide_type": "fragment"}
cursor = conn.cursor()
cursor.execute("SELECT name FROM sqlite_schema WHERE type='table';")
db_tables= [x[0] for x in cursor.fetchall()] # we are storing this information in a list
print(db_tables)
# + [markdown] slideshow={"slide_type": "slide"}
# If we want to determine the schema of one of these tables, say, the gapminder table, we may achieve it as follows.
# + slideshow={"slide_type": "fragment"}
cursor.execute("SELECT sql FROM sqlite_master WHERE name = 'gapminder';")
print(cursor.fetchall())
# + [markdown] slideshow={"slide_type": "fragment"}
# Alternatively, we may also use the description method to determine the schema instead.
# + slideshow={"slide_type": "fragment"}
data=cursor.execute("SELECT * FROM 'gapminder'")
data_descr=[x[0] for x in data.description]
print(data_descr)
# + slideshow={"slide_type": "slide"}
# If you do not want to type the names, you can ofcourse
# refer to the db_tables and iterate as needed.
data=cursor.execute('SELECT * FROM ' +str(db_tables[0]))
data_descr=[x[0] for x in data.description]
print(data_descr)
# + [markdown] slideshow={"slide_type": "slide"}
# How do we check the data itself? Depending on the purpose, we can obtain it in different manners. We have already seen the most basic option, of carrying out a cursor.execute() followed by fetchone()/fetchall().
#
# One can also instead just carry out a pandas read_sql_query, and store the result in a dataframe.
# + slideshow={"slide_type": "fragment"}
Countries_df = pd.read_sql_query("SELECT * FROM CountriesOfTheWorld", conn) # we need to provide the DB connection information
Countries_df
# + [markdown] slideshow={"slide_type": "slide"}
# We will next create the Olymp2016 table using data from <a href="https://en.wikipedia.org/wiki/2016_Summer_Olympics">Wikipedia</a> for the Rio 2016 Olympics.
#
# <img src="pics/olymp2016mascots.png" alt="socially distanced mascots" width="500"/>
# + [markdown] slideshow={"slide_type": "slide"}
# Let's start by reading the data from the Wikipedia page. The read_html() method works neatly (assuming that the HTML source file itself is constructed neatly, otherwise, it may get miserable!). The read_html() obtains several tables. After inspection of the results, it turned out that the table indexed '2' is the medal tally table for the specific Wiki-page (as on 9th August 2021).
# + slideshow={"slide_type": "fragment"}
olymp_df=pd.read_html(r'https://en.wikipedia.org/wiki/2016_Summer_Olympics_medal_table')
olymp2016medals=olymp_df[2]
olymp2016medals
# + [markdown] slideshow={"slide_type": "slide"}
# We do not want to populate the database with a derived information of Totals, and wish to eliminate the last line. Let's try that!
# + slideshow={"slide_type": "fragment"}
olymp2016medals=olymp_df[2][:-1]
olymp2016medals
# + [markdown] slideshow={"slide_type": "slide"}
# NOC, standing for <b>National Olympic Committees</b> may become difficult to recall later on. Let's rename the column as <b>Country</b> instead.
# + slideshow={"slide_type": "fragment"}
olymp2016medals = olymp2016medals.rename(columns={'NOC':'Country'})
olymp2016medals
# + [markdown] slideshow={"slide_type": "slide"}
# Let's check the data types.
# + slideshow={"slide_type": "fragment"}
olymp2016medals.dtypes
# + [markdown] slideshow={"slide_type": "fragment"}
# How about we designate rank explicitly as integer?
# + slideshow={"slide_type": "fragment"}
olymp2016medals=olymp2016medals.astype({'Rank': 'int64'}, copy=True)
olymp2016medals.dtypes
# + [markdown] slideshow={"slide_type": "slide"}
# We are almost there. However, since I have populated the database with this data previously, and (may) reuse this teaching material, it is possible that the table already exists in the DB! As such, lets do a conditional deletion of the table, in case it exists, using a DROP operation. And now, you know how to DROP a table from your DB!
#
# <img src="pics/BobbyTables.png" alt="SQLLite screenshot" width="500"/>
#
# Once that is done, let's add the Table using Panda's to_sql() function.
# + slideshow={"slide_type": "fragment"}
# Drop table from DB conditionally, if it already existed.
if "Olymp2016" in db_tables:
cursor.execute('''DROP TABLE Olymp2016''')
# Add table to DB
olymp2016medals.to_sql('Olymp2016', conn, if_exists='replace', index = False)
# With the use of if_exists option, we could in fact get rid of the explicit conditional DROP TABLE op.
# other options for if_exists: fail, append (default: fail)
conn.commit() # Commit the changes to be sure.
# + slideshow={"slide_type": "skip"}
# Sanity check code: Is the data the same?
Olympics_df=pd.read_sql_query("SELECT * FROM Olymp2016", conn)
olymp2016medals == Olympics_df
# + [markdown] slideshow={"slide_type": "slide"}
# We are all good! The table entries have been added to the DB. We can now start querying the data. For example, if we want to know which all countries won more Gold medals than Silver and Bronze medals combined, we can get that easy-peasy.
# + slideshow={"slide_type": "fragment"}
MoreGoldThanSilver_df= pd.read_sql_query("SELECT * FROM Olymp2016 WHERE Gold>Silver+Bronze", conn)
MoreGoldThanSilver_df
# + [markdown] slideshow={"slide_type": "slide"}
# SQLite does not implement SQL fully, e.g., it does not support RIGHT JOIN and FULL OUTER JOIN (though it can be emulated, e.g., <a href="https://www.sqlitetutorial.net/sqlite-full-outer-join/">see here for details</a>). But it does support a lot of features, including INNER JOIN.
#
# Here's an example of how we can create a summary of countries with their population as per the CountriesOfTheWorld table, choosing the countries where the life expectancy reported in 21st century has been less than 50 years, along with the year of the record, as per the gapminder table.
#
# To make the code readable (and reusable?), we have written the query separately first, before invoking the actual read_sql_query() function.
# + slideshow={"slide_type": "slide"}
SQL_quer = """
SELECT CountriesOfTheWorld.Country, CountriesOfTheWorld.Population, year, lifeExp
FROM CountriesOfTheWorld
INNER JOIN gapminder
ON CountriesOfTheWorld.Country =gapminder.country
WHERE year>1999 and lifeExp <50;
"""
Quer_res = pd.read_sql_query(SQL_quer, conn)
Quer_res
#conn.close() when you are done, and want to close the connection to the DB
# + [markdown] slideshow={"slide_type": "slide"}
# #### Ungraded tasks
# Now, try a few things out yourselves. Following are some ideas to try. But think for yourself also, what other interesting information do you want to determine from these three tables!
#
# If your Python/Pandas skills are good, you will be tempted to import the data as Pandas dataframes and then operate on those dataframes. But, let's do that some other time (next session, in fact!). Instead, try to see what all you can do purely with SQL(ite). However, you may use Python to create scripts invoking the necessary SQL commands to carry out any necessary 'data cleaning or manipulation' operations.
#
# <b>Ungraded Task 1.1:</b> Determine all the countries with a population of more than 20 million as per the CountriesOfTheWorld table, that got no gold medals.
#
# <b>Ungraded Task 1.2:</b> Determine the ratio of the number of total medals to the per capita GDP for each country as per the CountriesOfTheWorld table.
#
# If and when you manage to solve these, share and compare your solution with others. Challenge each other with other such queries! Contemplate on what difficulties you encountered in answering these questions.
#
# Later in the course, we will delve into data wrangling, and use Python/Pandas to solve such questions. You could then compare the pros and cons with your experience.
# + [markdown] slideshow={"slide_type": "slide"}
# <p style="font-size:134%;color:Deep Teal;">That's it folks!</p>
#
# <img src="pics/ETL.png" alt="SQLLite screenshot" width="500"/>
| M1-Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div style = "font-family:Georgia;
# font-size:2.5vw;
# color:lightblue;
# font-style:bold;
# text-align:center;
# background:url('./iti/Title Background.gif') no-repeat center;
# background-size:cover)">
#
# <br></br><br></br><br></br>
# You Only Look Once (YOLO)
# <br></br><br></br>
#
# </div>
#
# <h1 style = "text-align:left">Introduction</h1>
#
# As you learned in the previous lessons, YOLO is a state-of-the-art, real-time object detection algorithm. In this notebook, we will apply the YOLO algorithm to detect objects in images. We have provided a series of images that you can test the YOLO algorithm on. Below is a list of the available images that you can load:
#
# * cat.jpg
# * city_scene.jpg
# * dog.jpg
# * dog2.jpg
# * eagle.jpg
# * food.jpg
# * giraffe.jpg
# * horses.jpg
# * motorbike.jpg
# * person.jpg
# * surf.jpg
# * wine.jpg
#
# These images are located in the`./images/`folder. We encourage you to test the YOLO algorithm on your own images as well. Have fun!
# # Importing Resources
#
# We will start by loading the required packages into Python. We will be using *OpenCV* to load our images, *matplotlib* to plot them, a`utils` module that contains some helper functions, and a modified version of *Darknet*. YOLO uses *Darknet*, an open source, deep neural network framework written by the creators of YOLO. The version of *Darknet* used in this notebook has been modified to work in PyTorch 0.4 and has been simplified because we won't be doing any training. Instead, we will be using a set of pre-trained weights that were trained on the Common Objects in Context (COCO) database. For more information on *Darknet*, please visit <a href="https://pjreddie.com/darknet/">Darknet</a>.
# +
import cv2
import matplotlib.pyplot as plt
from utils import *
from darknet import Darknet
# -
# # Setting Up The Neural Network
#
# We will be using the latest version of YOLO, known as YOLOv3. We have already downloaded the `yolov3.cfg` file that contains the network architecture used by YOLOv3 and placed it in the `/cfg/` folder. Similarly, we have placed the `yolov3.weights` file that contains the pre-trained weights in the `/weights/` directory. Finally, the `/data/` directory, contains the `coco.names` file that has the list of the 80 object classes that the weights were trained to detect.
#
# In the code below, we start by specifying the location of the files that contain the neural network architecture, the pre-trained weights, and the object classes. We then use *Darknet* to setup the neural network using the network architecture specified in the `cfg_file`. We then use the`.load_weights()` method to load our set of pre-trained weights into the model. Finally, we use the `load_class_names()` function, from the `utils` module, to load the 80 object classes.
# +
# Set the location and name of the cfg file
cfg_file = './cfg/yolov3.cfg'
# Set the location and name of the pre-trained weights file
weight_file = './weights/yolov3.weights'
# Set the location and name of the COCO object classes file
namesfile = 'data/coco.names'
# Load the network architecture
m = Darknet(cfg_file)
# Load the pre-trained weights
m.load_weights(weight_file)
# Load the COCO object classes
class_names = load_class_names(namesfile)
# -
# # Taking a Look at The Neural Network
#
# Now that the neural network has been setup, we can see what it looks like. We can print the network using the `.print_network()` function.
# Print the neural network used in YOLOv3
m.print_network()
# As we can see, the neural network used by YOLOv3 consists mainly of convolutional layers, with some shortcut connections and upsample layers. For a full description of this network please refer to the <a href="https://pjreddie.com/media/files/papers/YOLOv3.pdf">YOLOv3 Paper</a>.
#
# # Loading and Resizing Our Images
#
# In the code below, we load our images using OpenCV's `cv2.imread()` function. Since, this function loads images as BGR we will convert our images to RGB so we can display them with the correct colors.
#
# As we can see in the previous cell, the input size of the first layer of the network is 416 x 416 x 3. Since images have different sizes, we have to resize our images to be compatible with the input size of the first layer in the network. In the code below, we resize our images using OpenCV's `cv2.resize()` function. We then plot the original and resized images.
# +
# Set the default figure size
plt.rcParams['figure.figsize'] = [24.0, 14.0]
# Load the image
img = cv2.imread('./images/surf.jpg')
# Convert the image to RGB
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# We resize the image to the input width and height of the first layer of the network.
resized_image = cv2.resize(original_image, (m.width, m.height))
# Display the images
plt.subplot(121)
plt.title('Original Image')
plt.imshow(original_image)
plt.subplot(122)
plt.title('Resized Image')
plt.imshow(resized_image)
plt.show()
# -
# # Setting the Non-Maximal Suppression Threshold
#
# As you learned in the previous lessons, YOLO uses **Non-Maximal Suppression (NMS)** to only keep the best bounding box. The first step in NMS is to remove all the predicted bounding boxes that have a detection probability that is less than a given NMS threshold. In the code below, we set this NMS threshold to `0.6`. This means that all predicted bounding boxes that have a detection probability less than 0.6 will be removed.
# Set the NMS threshold
nms_thresh = 0.6
# # Setting the Intersection Over Union Threshold
#
# After removing all the predicted bounding boxes that have a low detection probability, the second step in NMS, is to select the bounding boxes with the highest detection probability and eliminate all the bounding boxes whose **Intersection Over Union (IOU)** value is higher than a given IOU threshold. In the code below, we set this IOU threshold to `0.4`. This means that all predicted bounding boxes that have an IOU value greater than 0.4 with respect to the best bounding boxes will be removed.
#
# In the `utils` module you will find the `nms` function, that performs the second step of Non-Maximal Suppression, and the `boxes_iou` function that calculates the Intersection over Union of two given bounding boxes. You are encouraged to look at these functions to see how they work.
# Set the IOU threshold
iou_thresh = 0.4
# # Object Detection
#
# Once the image has been loaded and resized, and you have chosen your parameters for `nms_thresh` and `iou_thresh`, we can use the YOLO algorithm to detect objects in the image. We detect the objects using the `detect_objects(m, resized_image, iou_thresh, nms_thresh)`function from the `utils` module. This function takes in the model `m` returned by *Darknet*, the resized image, and the NMS and IOU thresholds, and returns the bounding boxes of the objects found.
#
# Each bounding box contains 7 parameters: the coordinates *(x, y)* of the center of the bounding box, the width *w* and height *h* of the bounding box, the confidence detection level, the object class probability, and the object class id. The `detect_objects()` function also prints out the time it took for the YOLO algorithm to detect the objects in the image and the number of objects detected. Since we are running the algorithm on a CPU it takes about 2 seconds to detect the objects in an image, however, if we were to use a GPU it would run much faster.
#
# Once we have the bounding boxes of the objects found by YOLO, we can print the class of the objects found and their corresponding object class probability. To do this we use the `print_objects()` function in the `utils` module.
#
# Finally, we use the `plot_boxes()` function to plot the bounding boxes and corresponding object class labels found by YOLO in our image. If you set the `plot_labels` flag to `False` you will display the bounding boxes with no labels. This makes it easier to view the bounding boxes if your `nms_thresh` is too low. The `plot_boxes()`function uses the same color to plot the bounding boxes of the same object class. However, if you want all bounding boxes to be the same color, you can use the `color` keyword to set the desired color. For example, if you want all the bounding boxes to be red you can use:
#
# `plot_boxes(original_image, boxes, class_names, plot_labels = True, color = (1,0,0))`
#
# You are encouraged to change the `iou_thresh` and `nms_thresh` parameters to see how they affect the YOLO detection algorithm. The default values of `iou_thresh = 0.4` and `nms_thresh = 0.6` work well to detect objects in different kinds of images. In the cell below, we have repeated some of the code used before in order to prevent you from scrolling up down when you want to change the `iou_thresh` and `nms_thresh`parameters or the image. Have Fun!
# +
# Set the default figure size
plt.rcParams['figure.figsize'] = [24.0, 14.0]
# Load the image
img = cv2.imread('./images/surf.jpg')
# Convert the image to RGB
original_image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
# We resize the image to the input width and height of the first layer of the network.
resized_image = cv2.resize(original_image, (m.width, m.height))
# Set the IOU threshold. Default value is 0.4
iou_thresh = 0.4
# Set the NMS threshold. Default value is 0.6
nms_thresh = 0.6
# Detect objects in the image
boxes = detect_objects(m, resized_image, iou_thresh, nms_thresh)
# Print the objects found and the confidence level
print_objects(boxes, class_names)
#Plot the image with bounding boxes and corresponding object class labels
plot_boxes(original_image, boxes, class_names, plot_labels = True)
# -
| 2_2_YOLO/.ipynb_checkpoints/YOLO-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
PIP_PATH="~/anaconda3/bin/pip"
# # Option 1
# +
# !git clone https://github.com/minesh1291/ts-train.git
# # !cd ts-train/ && python setup.py install
# OR
# !cd ts-train/ && $PIP_PATH install -e .
# -
# !rm -rf ts-train
# # Option 2
# ! $PIP_PATH install git+https://github.com/minesh1291/ts-train.git
| examples/installation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.6 64-bit (''base'': conda)'
# name: python_defaultSpec_1599145129117
# ---
# ### Auto Sort Downloads Folder on mac
# Sort through certain file types in the downloads Folder
# - images (png, jpeg, jpg, etc.)
# - videos (mp4, etc.)
#
# Import dependencies
import os
import shutil
# +
mainpath='/Users/jacobmannix/Desktop/folder'
mainfiles = os.listdir(sourcepath)
image_path = sourcepath + "/images"
video_path = sourcepath + "/videos"
audio_path = sourcepath + "/audio"
svg_path = sourcepath + "/images/svg"
# https://www.computerhope.com/issues/ch001789.htm
image_types = ('.jpeg', 'jpg', 'JPG', 'jpeg-2000', 'png', 'HEIC', 'openexr', 'tiff', 'gif', 'raw')
video_types = ('mp4', '.avi', 'mkv', '.h264', '.h265', 'm4v', 'mov', 'mpg', 'mpeg', 'wmv')
audio_types = ('aif', 'cda', 'mid', 'midi', 'mp3', 'mpa', 'ogg', 'wav', 'wma', 'wpl')
svg_types = ('.svg')
for file in mainfiles:
if file.endswith(image_types):
shutil.move(os.path.join(sourcepath, file), os.path.join(image_path, file))
elif file.endswith(video_types):
shutil.move(os.path.join(sourcepath, file), os.path.join(video_path, file))
elif file.endswith(audio_types):
shutil.move(os.path.join(sourcepath, file), os.path.join(video_path, file))
# +
mainpath='/Users/jacobmannix/Desktop/folder'
mainfiles = os.listdir(sourcepath)
folders = ((image_types, image_path), (video_types, video_path), (audio_types, audio_path))
for types, path in folders:
for file in mainfiles:
if file.endswith(types):
shutil.move(os.path.join(sourcepath, file), os.path.join(sourcepath + path, file))
# +
other = ('image')
types_path = ((other_type, other_path))
# print(types_path)
# +
# Auto Sort Downloads
mainpath='/Users/jacobmannix/Desktop/folder'
mainfiles = os.listdir(mainpath)
folders = (
( # Images
"/images",
('.jpeg', 'jpg', 'JPG', 'jpeg-2000', 'png', 'HEIC', 'openexr', 'tiff', 'gif', 'raw')
),
( # Video
"/videos",
('mp4', '.avi', 'mkv', '.h264', '.h265', 'm4v', 'mov', 'mpg', 'mpeg', 'wmv')
),
( # Audio
"/audio",
('aif', 'cda', 'mid', 'midi', 'mp3', 'mpa', 'ogg', 'wav', 'wma', 'wpl')
),
( # SVG
"/images/svg",
('.svg')
)
)
for path, types in folders:
if os.path.isdir(sourcepath + path) == True:
for file in mainfiles:
if file.endswith(types):
shutil.move(os.path.join(sourcepath, file), os.path.join(sourcepath + path, file))
else:
os.mkdir(sourcepath + path)
# -
path = '/Users/jacobmannix/Desktop/folder/videos'
os.mkdir(path)
# + tags=[]
if os.path.isdir(sourcepath + path) == True:
print(sourcepath + path)
else:
print('false')
# -
| AutoSortFolders.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image
import numpy as np
from itertools import cycle
import random
import sys
import pygame
from pygame.locals import *
from flappy_agent_v3_reducemore_qlearn import agent
import pickle
SCREENWIDTH = 288
SCREENHEIGHT = 512
# amount by which base can maximum shift to left
PIPEGAPSIZE = 150 # gap between upper and lower part of pipe
BASEY = SCREENHEIGHT * 0.79
SNAPSHOT_INTERVAL=4
SNAPSHOT_MAX=15000
SNAPSHOT_PATH="snapshot/np_128square_without_label.pickle"
agent.snapshot_list=[]
# image, sound and hitmask dicts
IMAGES, HITMASKS = {}, {}
# list of all possible players (tuple of 3 positions of flap)
PLAYERS_LIST = (
# red bird
(
'assets/sprites/redbird-upflap.png',
'assets/sprites/redbird-midflap.png',
'assets/sprites/redbird-downflap.png',
),
)
# list of backgrounds
BACKGROUNDS_LIST = (
'assets/sprites/background-day.png',
)
# list of pipes
PIPES_LIST = (
'assets/sprites/pipe-green.png',
)
try:
xrange
except NameError:
xrange = range
def main():
global SCREEN, FPSCLOCK, FPS
FPS=30
pygame.init()
pygame.font.init()
myfont = pygame.font.SysFont('Comic Sans MS', 20)
FPSCLOCK = pygame.time.Clock()
SCREEN = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT))
pygame.display.set_caption('Flappy Bird')
# numbers sprites for score display
IMAGES['numbers'] = (
pygame.image.load('assets/sprites/0.png').convert_alpha(),
pygame.image.load('assets/sprites/1.png').convert_alpha(),
pygame.image.load('assets/sprites/2.png').convert_alpha(),
pygame.image.load('assets/sprites/3.png').convert_alpha(),
pygame.image.load('assets/sprites/4.png').convert_alpha(),
pygame.image.load('assets/sprites/5.png').convert_alpha(),
pygame.image.load('assets/sprites/6.png').convert_alpha(),
pygame.image.load('assets/sprites/7.png').convert_alpha(),
pygame.image.load('assets/sprites/8.png').convert_alpha(),
pygame.image.load('assets/sprites/9.png').convert_alpha()
)
# game over sprite
IMAGES['gameover'] = pygame.image.load('assets/sprites/gameover.png').convert_alpha()
# message sprite for welcome screen
IMAGES['message'] = pygame.image.load('assets/sprites/message.png').convert_alpha()
# base (ground) sprite
IMAGES['base'] = pygame.image.load('assets/sprites/base.png').convert_alpha()
ai_switch=0
while True:
# select random background sprites
randBg = 0
IMAGES['background'] = pygame.image.load(BACKGROUNDS_LIST[randBg]).convert()
# select random player sprites
randPlayer = 0
IMAGES['player'] = (
pygame.image.load(PLAYERS_LIST[randPlayer][0]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][1]).convert_alpha(),
pygame.image.load(PLAYERS_LIST[randPlayer][2]).convert_alpha(),
)
# select random pipe sprites
pipeindex = 0
IMAGES['pipe'] = (
pygame.transform.rotate(
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(), 180),
pygame.image.load(PIPES_LIST[pipeindex]).convert_alpha(),
)
# hismask for pipes
HITMASKS['pipe'] = (
getHitmask(IMAGES['pipe'][0]),
getHitmask(IMAGES['pipe'][1]),
)
# hitmask for player
HITMASKS['player'] = (
getHitmask(IMAGES['player'][0]),
getHitmask(IMAGES['player'][1]),
getHitmask(IMAGES['player'][2]),
)
if ai_switch==0:
movementInfo = showWelcomeAnimation()
if ai_switch==2:
pass
pygame.display.update()
crashInfo = mainGame(movementInfo)
ai_switch=showGameOverScreen(crashInfo)
def showWelcomeAnimation():
"""Shows welcome screen animation of flappy bird"""
myfont = pygame.font.SysFont('Comic Sans MS', 12)
FPS=30
# index of player to blit on screen
playerIndex = 0
playerIndexGen = cycle([0, 1, 2, 1])
# iterator used to change playerIndex after every 5th iteration
loopIter = 0
playerx = 64
playery = int((SCREENHEIGHT - IMAGES['player'][0].get_height()) / 2)
messagex = int((SCREENWIDTH - IMAGES['message'].get_width()) / 2)
messagey = int(SCREENHEIGHT * 0.12)
basex = 0
# amount by which base can maximum shift to left
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# player shm for up-down motion on welcome screen
playerShmVals = {'val': 0, 'dir': 1}
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
# make first flap sound and return values for mainGame
pygame.display.set_caption('Flappy Bird: Manual mode')
FPS=30
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
'AI_mod_on':False,
'Snapshot_mod_on':False,
}
if event.type == KEYDOWN and (event.key == K_a):
# make first flap sound and return values for mainGame
pygame.display.set_caption('Flappy Bird: AI mode')
FPS=10000
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
'AI_mod_on':True,
'Snapshot_mod_on':False,
}
if event.type == KEYDOWN and (event.key == K_s):
# make first flap sound and return values for mainGame
return {
'playery': playery + playerShmVals['val'],
'basex': basex,
'playerIndexGen': playerIndexGen,
'AI_mod_on':True,
'Snapshot_mod_on':True,
}
# adjust playery, playerIndex, basex
if (loopIter + 1) % 5 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
playerShm(playerShmVals)
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
SCREEN.blit(IMAGES['player'][playerIndex],
(playerx, playery + playerShmVals['val']))
SCREEN.blit(IMAGES['message'], (messagex, messagey))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
textsurface1 = myfont.render('AI mode: Press A ', False, (0, 0, 0))
textsurface2 = myfont.render('Manual mode: Press UP', False, (0, 0, 0))
SCREEN.blit(textsurface1,(5,512*0.8+20))
SCREEN.blit(textsurface2,(5,512*0.8+40))
pygame.display.update()
FPSCLOCK.tick(FPS)
def showGameOverScreen(crashInfo):
"""crashes the player down ans shows gameover image"""
score = crashInfo['score']
playerx = 64
playery = crashInfo['y']
playerHeight = IMAGES['player'][0].get_height()
playerVelY = crashInfo['playerVelY']
playerAccY = 2
playerRot = crashInfo['playerRot']
playerVelRot = 7
basex = crashInfo['basex']
upperPipes, lowerPipes = crashInfo['upperPipes'], crashInfo['lowerPipes']
myfont = pygame.font.SysFont('Comic Sans MS', 20)
while True:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery + playerHeight >= BASEY - 1:
return
if crashInfo["Restart"]:
return 2
# player y shift
if playery + playerHeight < BASEY - 1:
playery += min(playerVelY, BASEY - playery - playerHeight)
# player velocity change
if playerVelY < 15:
playerVelY += playerAccY
# rotate only when it's a pipe crash
if not crashInfo['groundCrash']:
if playerRot > -90:
playerRot -= playerVelRot
# draw sprites
SCREEN.blit(IMAGES['background'], (0,0))
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
SCREEN.blit(IMAGES['base'], (basex, BASEY))
showScore(score)
playerSurface = pygame.transform.rotate(IMAGES['player'][1], playerRot)
SCREEN.blit(playerSurface, (playerx,playery))
FPSCLOCK.tick(FPS)
pygame.display.update()
def playerShm(playerShm):
"""oscillates the value of playerShm['val'] between 8 and -8"""
if abs(playerShm['val']) == 8:
playerShm['dir'] *= -1
if playerShm['dir'] == 1:
playerShm['val'] += 1
else:
playerShm['val'] -= 1
def getRandomPipe():
"""returns a randomly generated pipe"""
# y of gap between upper and lower pipe
gapY = random.randrange(0, int(BASEY * 0.6 - PIPEGAPSIZE))
gapY += int(BASEY * 0.2)
pipeHeight = IMAGES['pipe'][0].get_height()
pipeX = SCREENWIDTH + 10
return [
{'x': pipeX, 'y': gapY - pipeHeight}, # upper pipe
{'x': pipeX, 'y': gapY + PIPEGAPSIZE}, # lower pipe
]
def showScore(score):
"""displays score in center of screen"""
scoreDigits = [int(x) for x in list(str(score))]
totalWidth = 0 # total width of all numbers to be printed
for digit in scoreDigits:
totalWidth += IMAGES['numbers'][digit].get_width()
Xoffset = (SCREENWIDTH - totalWidth) / 2
for digit in scoreDigits:
SCREEN.blit(IMAGES['numbers'][digit], (Xoffset, SCREENHEIGHT * 0.9))
Xoffset += IMAGES['numbers'][digit].get_width()
def checkCrash(player, upperPipes, lowerPipes):
"""returns True if player collders with base or pipes."""
pi = player['index']
player['w'] = IMAGES['player'][0].get_width()
player['h'] = IMAGES['player'][0].get_height()
# if player crashes into ground
if player['y'] + player['h'] >= BASEY - 1:
return [True, True]
else:
playerRect = pygame.Rect(player['x'], player['y'],
player['w'], player['h'])
pipeW = IMAGES['pipe'][0].get_width()
pipeH = IMAGES['pipe'][0].get_height()
for uPipe, lPipe in zip(upperPipes, lowerPipes):
# upper and lower pipe rects
uPipeRect = pygame.Rect(uPipe['x'], uPipe['y'], pipeW, pipeH)
lPipeRect = pygame.Rect(lPipe['x'], lPipe['y'], pipeW, pipeH)
# player and upper/lower pipe hitmasks
pHitMask = HITMASKS['player'][pi]
uHitmask = HITMASKS['pipe'][0]
lHitmask = HITMASKS['pipe'][1]
# if bird collided with upipe or lpipe
uCollide = pixelCollision(playerRect, uPipeRect, pHitMask, uHitmask)
lCollide = pixelCollision(playerRect, lPipeRect, pHitMask, lHitmask)
if uCollide or lCollide:
return [True, False]
return [False, False]
def pixelCollision(rect1, rect2, hitmask1, hitmask2):
"""Checks if two objects collide and not just their rects"""
rect = rect1.clip(rect2)
if rect.width == 0 or rect.height == 0:
return False
x1, y1 = rect.x - rect1.x, rect.y - rect1.y
x2, y2 = rect.x - rect2.x, rect.y - rect2.y
for x in xrange(rect.width):
for y in xrange(rect.height):
if hitmask1[x1+x][y1+y] and hitmask2[x2+x][y2+y]:
return True
return False
def getHitmask(image):
"""returns a hitmask using an image's alpha."""
mask = []
for x in xrange(image.get_width()):
mask.append([])
for y in xrange(image.get_height()):
mask[x].append(bool(image.get_at((x,y))[3]))
return mask
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
###############################################################################################
def snapshot2np(bit_string,window_size):
image_snapshot=Image.frombytes('RGB',window_size,bit_string)
image_snapshot=image_snapshot.rotate(180).transpose(Image.FLIP_LEFT_RIGHT).crop((0,0,SCREENWIDTH,SCREENHEIGHT*0.8))
image_snapshot=image_snapshot.resize((128,128),Image.ANTIALIAS)
# resize the screenshot to reduce the dimensionality
return np.array(image_snapshot)
def mainGame(movementInfo):
FPS=30
score = playerIndex = loopIter = 0
myfont = pygame.font.SysFont('Comic Sans MS', 12)
playerIndexGen = movementInfo['playerIndexGen']
playerx, playery = 64, movementInfo['playery']
basex = movementInfo['basex']
baseShift = IMAGES['base'].get_width() - IMAGES['background'].get_width()
# get 2 new pipes to add to upperPipes lowerPipes list
newPipe1 = getRandomPipe()
newPipe2 = getRandomPipe()
# list of upper pipes
upperPipes = [
{'x': SCREENWIDTH + 192, 'y': newPipe1[0]['y']},
{'x': SCREENWIDTH + 192 + (SCREENWIDTH / 2), 'y': newPipe2[0]['y']},
]
# list of lowerpipe
lowerPipes = [
{'x': SCREENWIDTH + 192, 'y': newPipe1[1]['y']},
{'x': SCREENWIDTH + 192 + (SCREENWIDTH / 2), 'y': newPipe2[1]['y']},
]
pipeVelX = -4
# player velocity, max velocity, downward accleration, accleration on flap
playerVelY = -9 # player's velocity along Y, default same as playerFlapped
playerMaxVelY = 10 # max vel along Y, max descend speed
playerMinVelY = -8 # min vel along Y, max ascend speed
playerAccY = 1 # players downward accleration
playerRot = 45 # player's rotation
playerVelRot = 3 # angular speed
playerRotThr = 20 # rotation threshold
playerFlapAcc = -9 # players speed on flapping
playerFlapped = False # True when player flaps
if movementInfo['AI_mod_on']:
#agent reset before each game
agent.new_round()
if movementInfo['AI_mod_on']:
FPS=1000000
if movementInfo['Snapshot_mod_on']:
FPS=80
while True:
# in snapshot mode if agent get the max. snapshots the game exits itself
if len(agent.snapshot_list)>=SNAPSHOT_MAX:
sys.exit()
# check for crash here
crashTest = checkCrash({'x': playerx, 'y': playery, 'index': playerIndex},
upperPipes, lowerPipes)
if movementInfo['AI_mod_on'] and loopIter%agent.sample_interval==0:
#translate the current game information
current_state=agent.get_state({
'playerx':playerx,
'playery':playery,
'lowerPipes':lowerPipes,
'playerVelY':playerVelY,
'crashed':crashTest[0],
'score':score
})
#get best action from agent
best_action=agent.get_action(current_state)
state_code=agent.stateEncoder(current_state)
# print(agent.stateEncoder(current_state),agent.Q_table[state_code],best_action)
#agent record the step information
agent.next_step(current_state,best_action)
if best_action==1:
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
else:
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
if event.type == KEYDOWN and (event.key == K_SPACE or event.key == K_UP):
if playery > -2 * IMAGES['player'][0].get_height():
playerVelY = playerFlapAcc
playerFlapped = True
if crashTest[0]:
if movementInfo['AI_mod_on']:
#agent update its model
agent.end_round()
if not movementInfo['Snapshot_mod_on']:
agent.update_model()
#agent log the score information
agent.logger(score)
#agent output the debug information
agent.debug()
return {
'y': playery,
'groundCrash': crashTest[1],
'basex': basex,
'upperPipes': upperPipes,
'lowerPipes': lowerPipes,
'score': score,
'playerVelY': playerVelY,
'playerRot': playerRot,
'Restart': movementInfo['AI_mod_on']
}
# check for score
playerMidPos = playerx + IMAGES['player'][0].get_width() / 2
for pipe in upperPipes:
pipeMidPos = pipe['x'] + IMAGES['pipe'][0].get_width() / 2
if pipeMidPos <= playerMidPos < pipeMidPos + 4:
score += 1
# playerIndex basex change
if (loopIter + 1) % 3 == 0:
playerIndex = next(playerIndexGen)
loopIter = (loopIter + 1) % 30
# rotate the player
if playerRot > -90:
playerRot -= playerVelRot
# player's movement
if playerVelY < playerMaxVelY and not playerFlapped:
playerVelY += playerAccY
if playerFlapped:
playerFlapped = False
# more rotation to cover the threshold (calculated in visible rotation)
playerRot = 45
playerHeight = IMAGES['player'][playerIndex].get_height()
playery += min(playerVelY, BASEY - playery - playerHeight)
# move pipes to left
for uPipe, lPipe in zip(upperPipes, lowerPipes):
uPipe['x'] += pipeVelX
lPipe['x'] += pipeVelX
# add new pipe when first pipe is about to touch left of screen
if 0 < upperPipes[0]['x'] < 5:
newPipe = getRandomPipe()
upperPipes.append(newPipe[0])
lowerPipes.append(newPipe[1])
# remove first pipe if its out of the screen
if upperPipes[0]['x'] < -IMAGES['pipe'][0].get_width():
upperPipes.pop(0)
lowerPipes.pop(0)
# switch to headless mode
headless_switch=0
# draw sprites
if headless_switch:
SCREEN.blit(IMAGES['background'], (0,0))
if headless_switch:
for uPipe, lPipe in zip(upperPipes, lowerPipes):
SCREEN.blit(IMAGES['pipe'][0], (uPipe['x'], uPipe['y']))
SCREEN.blit(IMAGES['pipe'][1], (lPipe['x'], lPipe['y']))
if headless_switch:
SCREEN.blit(IMAGES['base'], (basex, BASEY))
# print score so player overlaps the score
if headless_switch:
showScore(score)
if agent.iteration>0:
textsurface = myfont.render('Iter: {:4d} Max: {:4d} Last 100 Game Avg: {:4.1f}'.\
format(agent.iteration,agent.max_score,\
agent.last_100_avg_log[-1]), False, (0, 0, 0))
textsurface2 = myfont.render('Time: {:5.0f}s'.\
format(agent.elapsed_time), False, (0, 0, 0))
textsurface3 = myfont.render('Game #: {:5d}'.\
format(agent.game_number), False, (0, 0, 0))
if headless_switch:
SCREEN.blit(textsurface,(5,512*0.8+20))
SCREEN.blit(textsurface2,(5,512*0.85+20))
SCREEN.blit(textsurface3,(5,512*0.9+20))
# Player rotation has a threshold
visibleRot = playerRotThr
if playerRot <= playerRotThr:
visibleRot = playerRot
playerSurface = pygame.transform.rotate(IMAGES['player'][playerIndex], visibleRot)
if headless_switch:
SCREEN.blit(playerSurface, (playerx, playery))
pygame.display.update()
# take screen shots here
if movementInfo['Snapshot_mod_on'] and loopIter%SNAPSHOT_INTERVAL==0:
str_snapshot = pygame.image.tostring(SCREEN, "RGB", True)
window_size=SCREEN.get_size()
snapshot_np=snapshot2np(str_snapshot,window_size)
agent.snapshot_list.append(snapshot_np)
FPSCLOCK.tick(FPS)
if __name__ == '__main__':
main()
# -
import matplotlib.pyplot as plt
plt.plot(agent.last_100_avg_log)
plt.plot(agent.max_score_log)
# +
csv = open("q-learn_rep.csv", "w")
#"w" indicates that you're writing strings to the file
columnTitleRow = "last_100_avg, max_score_log\n"
csv.write(columnTitleRow)
for a,b in zip(agent.last_100_avg_log,agent.max_score_log):
avg = a
maxscore = b
row = str(avg) + "," +str(maxscore) + "\n"
csv.write(row)
csv.close()
# -
len(agent.Q_table)
| .ipynb_checkpoints/flappy-ai-mod-qlearn-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Imports
# +
import pandas as pd
import numpy as np
import seaborn as sns
from pathlib import Path
import re
import niddk_covid_sicr as ncs
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import xlsxwriter
import itertools
import os
# from itertools import islice
# %matplotlib inline
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 18}
matplotlib.rc('font', **font)
# -
# # Paths
table_path = Path('/data/schwartzao/covid-sicr/tables/20210809/')
reweighted_table = table_path / 'fit_table_reweighted_and_averaged.csv' # FOR MODEL AVERAGED RESULTS
# reweighted_table = table_path / 'fit_table_reweighted.csv' # FOR NON MODEL AVERAGED RESULTS
figures_path = Path('/data/schwartzao/covid-sicr/figures/20210809/')
models_path = '/home/schwartzao/covid-sicr/models/'
# # Functions
# +
def fix_region_name(roi): # Remove underscores
roi = roi.replace('_', ' ')
roi = ' '.join(roi.split(',')[::-1]).strip()
return roi
def get_model_contributions_table(raw_table_path, stats):
df = pd.read_csv(raw_table_path, index_col=['model', 'roi', 'quantile'])
df = df[~df.index.duplicated(keep='last')]
# get last week from dataframe
cols = df.columns
rt_cols = [x for x in df.columns if x.startswith('Rt') and x !='Rt-by-week']
rt_weeks = [x.split(' ')[2].strip(')') for x in rt_cols]
total_weeks = str(max([int(x) for x in rt_weeks]))
df.columns.name = 'param'
df = df.stack('param').unstack(['roi', 'quantile', 'param']).T
last_week_stats = [x + f' (week {total_weeks})' for x in stats]
ll_waic_loo_aic = ['ll_', 'waic', 'loo', 'lp__rhat', 'num weeks', 'num_params', 'aic', 'waic']
rois = df.index.get_level_values('roi').unique()
dfs = []
for roi in rois:
ll_stats = df.loc[(roi, 'mean', ll_waic_loo_aic)]
other_stats = df.loc[(roi, '0.5', last_week_stats)]
dfs.append(ll_stats)
dfs.append(other_stats)
df_result = pd.concat(dfs)
# report minimum values for ll, waic, loo, aic
columns = [col for col in df if col.startswith('Discrete')]
df_result = df_result.assign(minimum = df_result[columns].min(axis=1), minimum_column=df_result[columns].idxmin(axis=1))
models = ['Discrete1', 'Discrete2', 'Discrete3', 'Discrete4']
non_converged_dfs = []
for roi in rois:
df_result.loc[(roi, 'mean', ['num weeks','num_params']), 'minimum_column'] = ''
df_result.loc[(roi, 'mean', ['num weeks','num_params']), 'minimum'] = ''
df_result.loc[(roi, 'mean', 'convergence'), models] = ''
df_result.loc[(roi, '0.5', last_week_stats), 'minimum_column'] = ''
df_result.loc[(roi, '0.5', last_week_stats), 'minimum'] = ''
df_loos = df_result.reset_index()
df_loos = df_loos[df_loos.param.isin(['loo'])]
df_loos_slim = df_loos[['roi', 'param', 'Discrete1','Discrete2','Discrete3','Discrete4']]
df_loos_slim.set_index('roi')
# I'm arbitrarily setting the benchmark for convergence at loos < 5000
df_loos_slim.loc[df_loos_slim.Discrete1 > 5000, 'Discrete1'] = 'False'
df_loos_slim.loc[df_loos_slim.Discrete2 > 5000, 'Discrete2'] = 'False'
df_loos_slim.loc[df_loos_slim.Discrete3 > 5000, 'Discrete3'] = 'False'
df_loos_slim.loc[df_loos_slim.Discrete4 > 5000, 'Discrete4'] = 'False'
return df_result, df_loos_slim
# -
# # Get contributions and convergence tables and save to tables path
# +
stats = ['Rt', 'car', 'ifr', 'ir', 'R0'] # define stats we want
df_result, df_convergence = get_model_contributions_table(table_path / 'fit_table_raw.csv', stats)
df_result.to_csv(table_path / 'model_contributions_and_median_stats.csv')
converg_path = table_path / 'convergence.csv'
df_convergence.to_csv(converg_path)
print(df_convergence)
# -
| notebooks/AOS/get_model_contributions_and_convergence.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Solutions
# This notebook cannot be run on its own. It just serves as a helpful information container for another notebook. Copy the content of the cells below into GenesToPhenotypes.ipynb. **DO NOT RUN THE CELLS**.
# ### Exercise 1
# The user loops through the images in the dataframe and thresholds each of the images.
# Each image is then saved as a TIFF file.
# We only use the ``df_filtered`` Data frame.
for index, row in df_filtered.iterrows():
image_id = row['Image']
image = conn.getObject("Image", image_id)
pixels = image.getPrimaryPixels()
image_plane = pixels.getPlane(0, 0, 0)
filtered = scipy.ndimage.median_filter(image_plane, size=3)
threshold = filters.threshold_otsu(filtered)
print('Threshold value is {}'.format(threshold))
predicted = numpy.uint8(filtered > threshold) * 255
name="%s/%s.tif" % (home, image_id)
tifffile.imsave(name, predicted)
| solutions/GenesToPhenotypesSolutions.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp mesh_tools
# -
# # mesh_tools
# > Default description (change me)
# +
#export
import copy
from functools import partial, reduce
import json
import os
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
import scipy.misc as misc
import scipy.signal as signal
from skimage import io
from skimage.feature import canny
from skimage.transform import resize
import torch
import transforms3d
from vispy import scene, io
from vispy.scene import visuals
from vispy.visuals.filters import Alpha
try:
import cynetworkx as netx
except ImportError:
import networkx as netx
from pomerantz.utils import refine_depth_around_edge, smooth_cntsyn_gap
from pomerantz.utils import require_depth_edge, filter_irrelevant_edge_new, open_small_mask
# -
#export
def relabel_node(mesh, nodes, cur_node, new_node):
if cur_node == new_node:
return mesh
mesh.add_node(new_node)
for key, value in nodes[cur_node].items():
nodes[new_node][key] = value
for ne in mesh.neighbors(cur_node):
mesh.add_edge(new_node, ne)
mesh.remove_node(cur_node)
return mesh
#export
def filter_edge(mesh, edge_ccs, config, invalid=False):
context_ccs = [set() for _ in edge_ccs]
mesh_nodes = mesh.nodes
for edge_id, edge_cc in enumerate(edge_ccs):
if config['context_thickness'] == 0:
continue
edge_group = {}
for edge_node in edge_cc:
far_nodes = mesh_nodes[edge_node].get('far')
if far_nodes is None:
continue
for far_node in far_nodes:
context_ccs[edge_id].add(far_node)
if mesh_nodes[far_node].get('edge_id') is not None:
if edge_group.get(mesh_nodes[far_node]['edge_id']) is None:
edge_group[mesh_nodes[far_node]['edge_id']] = set()
edge_group[mesh_nodes[far_node]['edge_id']].add(far_node)
if len(edge_cc) > 2:
for edge_key in [*edge_group.keys()]:
if len(edge_group[edge_key]) == 1:
context_ccs[edge_id].remove([*edge_group[edge_key]][0])
valid_edge_ccs = []
for xidx, yy in enumerate(edge_ccs):
if invalid is not True and len(context_ccs[xidx]) > 0:
# if len(context_ccs[xidx]) > 0:
valid_edge_ccs.append(yy)
elif invalid is True and len(context_ccs[xidx]) == 0:
valid_edge_ccs.append(yy)
else:
valid_edge_ccs.append(set())
# valid_edge_ccs = [yy for xidx, yy in enumerate(edge_ccs) if len(context_ccs[xidx]) > 0]
return valid_edge_ccs
#export
def extrapolate(global_mesh,
info_on_pix,
image,
depth,
other_edge_with_id,
edge_map,
edge_ccs,
depth_edge_model,
depth_feat_model,
rgb_feat_model,
config,
direc='right-up'):
h_off, w_off = global_mesh.graph['hoffset'], global_mesh.graph['woffset']
noext_H, noext_W = global_mesh.graph['noext_H'], global_mesh.graph['noext_W']
if "up" in direc.lower() and "-" not in direc.lower():
all_anchor = [0, h_off + config['context_thickness'], w_off, w_off + noext_W]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [0, h_off, w_off, w_off + noext_W]
context_anchor = [h_off, h_off + config['context_thickness'], w_off, w_off + noext_W]
valid_line_anchor = [h_off, h_off + 1, w_off, w_off + noext_W]
valid_anchor = [min(mask_anchor[0], context_anchor[0]), max(mask_anchor[1], context_anchor[1]),
min(mask_anchor[2], context_anchor[2]), max(mask_anchor[3], context_anchor[3])]
elif "down" in direc.lower() and "-" not in direc.lower():
all_anchor = [h_off + noext_H - config['context_thickness'], 2 * h_off + noext_H, w_off, w_off + noext_W]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [h_off + noext_H, 2 * h_off + noext_H, w_off, w_off + noext_W]
context_anchor = [h_off + noext_H - config['context_thickness'], h_off + noext_H, w_off, w_off + noext_W]
valid_line_anchor = [h_off + noext_H - 1, h_off + noext_H, w_off, w_off + noext_W]
valid_anchor = [min(mask_anchor[0], context_anchor[0]), max(mask_anchor[1], context_anchor[1]),
min(mask_anchor[2], context_anchor[2]), max(mask_anchor[3], context_anchor[3])]
elif "left" in direc.lower() and "-" not in direc.lower():
all_anchor = [h_off, h_off + noext_H, 0, w_off + config['context_thickness']]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [h_off, h_off + noext_H, 0, w_off]
context_anchor = [h_off, h_off + noext_H, w_off, w_off + config['context_thickness']]
valid_line_anchor = [h_off, h_off + noext_H, w_off, w_off + 1]
valid_anchor = [min(mask_anchor[0], context_anchor[0]), max(mask_anchor[1], context_anchor[1]),
min(mask_anchor[2], context_anchor[2]), max(mask_anchor[3], context_anchor[3])]
elif "right" in direc.lower() and "-" not in direc.lower():
all_anchor = [h_off, h_off + noext_H, w_off + noext_W - config['context_thickness'], 2 * w_off + noext_W]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [h_off, h_off + noext_H, w_off + noext_W, 2 * w_off + noext_W]
context_anchor = [h_off, h_off + noext_H, w_off + noext_W - config['context_thickness'], w_off + noext_W]
valid_line_anchor = [h_off, h_off + noext_H, w_off + noext_W - 1, w_off + noext_W]
valid_anchor = [min(mask_anchor[0], context_anchor[0]), max(mask_anchor[1], context_anchor[1]),
min(mask_anchor[2], context_anchor[2]), max(mask_anchor[3], context_anchor[3])]
elif "left" in direc.lower() and "up" in direc.lower() and "-" in direc.lower():
all_anchor = [0, h_off + config['context_thickness'], 0, w_off + config['context_thickness']]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [0, h_off, 0, w_off]
context_anchor = "inv-mask"
valid_line_anchor = None
valid_anchor = all_anchor
elif "left" in direc.lower() and "down" in direc.lower() and "-" in direc.lower():
all_anchor = [h_off + noext_H - config['context_thickness'], 2 * h_off + noext_H, 0, w_off + config['context_thickness']]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [h_off + noext_H, 2 * h_off + noext_H, 0, w_off]
context_anchor = "inv-mask"
valid_line_anchor = None
valid_anchor = all_anchor
elif "right" in direc.lower() and "up" in direc.lower() and "-" in direc.lower():
all_anchor = [0, h_off + config['context_thickness'], w_off + noext_W - config['context_thickness'], 2 * w_off + noext_W]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [0, h_off, w_off + noext_W, 2 * w_off + noext_W]
context_anchor = "inv-mask"
valid_line_anchor = None
valid_anchor = all_anchor
elif "right" in direc.lower() and "down" in direc.lower() and "-" in direc.lower():
all_anchor = [h_off + noext_H - config['context_thickness'], 2 * h_off + noext_H, w_off + noext_W - config['context_thickness'], 2 * w_off + noext_W]
global_shift = [all_anchor[0], all_anchor[2]]
mask_anchor = [h_off + noext_H, 2 * h_off + noext_H, w_off + noext_W, 2 * w_off + noext_W]
context_anchor = "inv-mask"
valid_line_anchor = None
valid_anchor = all_anchor
global_mask = np.zeros_like(depth)
global_mask[mask_anchor[0]:mask_anchor[1],mask_anchor[2]:mask_anchor[3]] = 1
mask = global_mask[valid_anchor[0]:valid_anchor[1], valid_anchor[2]:valid_anchor[3]] * 1
context = 1 - mask
global_context = np.zeros_like(depth)
global_context[all_anchor[0]:all_anchor[1],all_anchor[2]:all_anchor[3]] = context
# context = global_context[valid_anchor[0]:valid_anchor[1], valid_anchor[2]:valid_anchor[3]] * 1
valid_area = mask + context
input_rgb = image[valid_anchor[0]:valid_anchor[1], valid_anchor[2]:valid_anchor[3]] / 255. * context[..., None]
input_depth = depth[valid_anchor[0]:valid_anchor[1], valid_anchor[2]:valid_anchor[3]] * context
log_depth = np.log(input_depth + 1e-8)
log_depth[mask > 0] = 0
input_mean_depth = np.mean(log_depth[context > 0])
input_zero_mean_depth = (log_depth - input_mean_depth) * context
input_disp = 1./np.abs(input_depth)
input_disp[mask > 0] = 0
input_disp = input_disp / input_disp.max()
valid_line = np.zeros_like(depth)
if valid_line_anchor is not None:
valid_line[valid_line_anchor[0]:valid_line_anchor[1], valid_line_anchor[2]:valid_line_anchor[3]] = 1
valid_line = valid_line[all_anchor[0]:all_anchor[1], all_anchor[2]:all_anchor[3]]
# f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True); ax1.imshow(global_context * 1 + global_mask * 2); ax2.imshow(image); plt.show()
# f, ((ax1, ax2, ax3)) = plt.subplots(1, 3, sharex=True, sharey=True); ax1.imshow(context * 1 + mask * 2); ax2.imshow(input_rgb); ax3.imshow(valid_line); plt.show()
# import pdb; pdb.set_trace()
# return
input_edge_map = edge_map[all_anchor[0]:all_anchor[1], all_anchor[2]:all_anchor[3]] * context
input_other_edge_with_id = other_edge_with_id[all_anchor[0]:all_anchor[1], all_anchor[2]:all_anchor[3]]
end_depth_maps = ((valid_line * input_edge_map) > 0) * input_depth
if isinstance(config["gpu_ids"], int) and (config["gpu_ids"] >= 0):
device = config["gpu_ids"]
else:
device = "cpu"
valid_edge_ids = sorted(list(input_other_edge_with_id[(valid_line * input_edge_map) > 0]))
valid_edge_ids = valid_edge_ids[1:] if (len(valid_edge_ids) > 0 and valid_edge_ids[0] == -1) else valid_edge_ids
edge = reduce(lambda x, y: (x + (input_other_edge_with_id == y).astype(np.uint8)).clip(0, 1), [np.zeros_like(mask)] + list(valid_edge_ids))
t_edge = torch.FloatTensor(edge).to(device)[None, None, ...]
t_rgb = torch.FloatTensor(input_rgb).to(device).permute(2,0,1).unsqueeze(0)
t_mask = torch.FloatTensor(mask).to(device)[None, None, ...]
t_context = torch.FloatTensor(context).to(device)[None, None, ...]
t_disp = torch.FloatTensor(input_disp).to(device)[None, None, ...]
t_depth_zero_mean_depth = torch.FloatTensor(input_zero_mean_depth).to(device)[None, None, ...]
depth_edge_output = depth_edge_model.forward_3P(t_mask, t_context, t_rgb, t_disp, t_edge, unit_length=128,
cuda=device)
t_output_edge = (depth_edge_output> config['ext_edge_threshold']).float() * t_mask + t_edge
output_raw_edge = t_output_edge.data.cpu().numpy().squeeze()
# import pdb; pdb.set_trace()
mesh = netx.Graph()
hxs, hys = np.where(output_raw_edge * mask > 0)
valid_map = mask + context
for hx, hy in zip(hxs, hys):
node = (hx, hy)
mesh.add_node((hx, hy))
eight_nes = [ne for ne in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1), \
(hx + 1, hy + 1), (hx - 1, hy - 1), (hx - 1, hy + 1), (hx + 1, hy - 1)]\
if 0 <= ne[0] < output_raw_edge.shape[0] and 0 <= ne[1] < output_raw_edge.shape[1] and 0 < output_raw_edge[ne[0], ne[1]]]
for ne in eight_nes:
mesh.add_edge(node, ne, length=np.hypot(ne[0] - hx, ne[1] - hy))
if end_depth_maps[ne[0], ne[1]] != 0:
mesh.nodes[ne[0], ne[1]]['cnt'] = True
mesh.nodes[ne[0], ne[1]]['depth'] = end_depth_maps[ne[0], ne[1]]
ccs = [*netx.connected_components(mesh)]
end_pts = []
for cc in ccs:
end_pts.append(set())
for node in cc:
if mesh.nodes[node].get('cnt') is not None:
end_pts[-1].add((node[0], node[1], mesh.nodes[node]['depth']))
fpath_map = np.zeros_like(output_raw_edge) - 1
npath_map = np.zeros_like(output_raw_edge) - 1
for end_pt, cc in zip(end_pts, ccs):
sorted_end_pt = []
if len(end_pt) >= 2:
continue
if len(end_pt) == 0:
continue
if len(end_pt) == 1:
sub_mesh = mesh.subgraph(list(cc)).copy()
pnodes = netx.periphery(sub_mesh)
ends = [*end_pt]
edge_id = global_mesh.nodes[(ends[0][0] + all_anchor[0], ends[0][1] + all_anchor[2], -ends[0][2])]['edge_id']
pnodes = sorted(pnodes,
key=lambda x: np.hypot((x[0] - ends[0][0]), (x[1] - ends[0][1])),
reverse=True)[0]
npath = [*netx.shortest_path(sub_mesh, (ends[0][0], ends[0][1]), pnodes, weight='length')]
for np_node in npath:
npath_map[np_node[0], np_node[1]] = edge_id
fpath = []
if global_mesh.nodes[(ends[0][0] + all_anchor[0], ends[0][1] + all_anchor[2], -ends[0][2])].get('far') is None:
print("None far")
import pdb; pdb.set_trace()
else:
fnodes = global_mesh.nodes[(ends[0][0] + all_anchor[0], ends[0][1] + all_anchor[2], -ends[0][2])].get('far')
fnodes = [(xx[0] - all_anchor[0], xx[1] - all_anchor[2], xx[2]) for xx in fnodes]
dmask = mask + 0
did = 0
while True:
did += 1
dmask = cv2.dilate(dmask, np.ones((3, 3)), iterations=1)
if did > 3:
break
# ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0)]
ffnode = [fnode for fnode in fnodes if (dmask[fnode[0], fnode[1]] > 0 and mask[fnode[0], fnode[1]] == 0)]
if len(ffnode) > 0:
fnode = ffnode[0]
break
if len(ffnode) == 0:
continue
fpath.append((fnode[0], fnode[1]))
for step in range(0, len(npath) - 1):
parr = (npath[step + 1][0] - npath[step][0], npath[step + 1][1] - npath[step][1])
new_loc = (fpath[-1][0] + parr[0], fpath[-1][1] + parr[1])
new_loc_nes = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]),
(new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1)]\
if xx[0] >= 0 and xx[0] < fpath_map.shape[0] and xx[1] >= 0 and xx[1] < fpath_map.shape[1]]
if np.sum([fpath_map[nlne[0], nlne[1]] for nlne in new_loc_nes]) != -4:
break
if npath_map[new_loc[0], new_loc[1]] != -1:
if npath_map[new_loc[0], new_loc[1]] != edge_id:
break
else:
continue
if valid_area[new_loc[0], new_loc[1]] == 0:
break
new_loc_nes_eight = [xx for xx in [(new_loc[0] + 1, new_loc[1]), (new_loc[0] - 1, new_loc[1]),
(new_loc[0], new_loc[1] + 1), (new_loc[0], new_loc[1] - 1),
(new_loc[0] + 1, new_loc[1] + 1), (new_loc[0] + 1, new_loc[1] - 1),
(new_loc[0] - 1, new_loc[1] - 1), (new_loc[0] - 1, new_loc[1] + 1)]\
if xx[0] >= 0 and xx[0] < fpath_map.shape[0] and xx[1] >= 0 and xx[1] < fpath_map.shape[1]]
if np.sum([int(npath_map[nlne[0], nlne[1]] == edge_id) for nlne in new_loc_nes_eight]) == 0:
break
fpath.append((fpath[-1][0] + parr[0], fpath[-1][1] + parr[1]))
if step != len(npath) - 2:
for xx in npath[step+1:]:
if npath_map[xx[0], xx[1]] == edge_id:
npath_map[xx[0], xx[1]] = -1
if len(fpath) > 0:
for fp_node in fpath:
fpath_map[fp_node[0], fp_node[1]] = edge_id
# import pdb; pdb.set_trace()
far_edge = (fpath_map > -1).astype(np.uint8)
update_edge = (npath_map > -1) * mask + edge
t_update_edge = torch.FloatTensor(update_edge).to(device)[None, None, ...]
depth_output = depth_feat_model.forward_3P(t_mask, t_context, t_depth_zero_mean_depth, t_update_edge, unit_length=128,
cuda=device)
depth_output = depth_output.cpu().data.numpy().squeeze()
depth_output = np.exp(depth_output + input_mean_depth) * mask # + input_depth * context
# if "right" in direc.lower() and "-" not in direc.lower():
# plt.imshow(depth_output); plt.show()
# import pdb; pdb.set_trace()
# f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True); ax1.imshow(depth_output); ax2.imshow(npath_map + fpath_map); plt.show()
for near_id in np.unique(npath_map[npath_map > -1]):
depth_output = refine_depth_around_edge(depth_output.copy(),
(fpath_map == near_id).astype(np.uint8) * mask, # far_edge_map_in_mask,
(fpath_map == near_id).astype(np.uint8), # far_edge_map,
(npath_map == near_id).astype(np.uint8) * mask,
mask.copy(),
np.zeros_like(mask),
config)
# if "right" in direc.lower() and "-" not in direc.lower():
# plt.imshow(depth_output); plt.show()
# import pdb; pdb.set_trace()
# f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True); ax1.imshow(depth_output); ax2.imshow(npath_map + fpath_map); plt.show()
rgb_output = rgb_feat_model.forward_3P(t_mask, t_context, t_rgb, t_update_edge, unit_length=128,
cuda=device)
# rgb_output = rgb_feat_model.forward_3P(t_mask, t_context, t_rgb, t_update_edge, unit_length=128, cuda=config['gpu_ids'])
if config.get('gray_image') is True:
rgb_output = rgb_output.mean(1, keepdim=True).repeat((1,3,1,1))
rgb_output = ((rgb_output.squeeze().data.cpu().permute(1,2,0).numpy() * mask[..., None] + input_rgb) * 255).astype(np.uint8)
image[all_anchor[0]:all_anchor[1], all_anchor[2]:all_anchor[3]][mask > 0] = rgb_output[mask > 0] # np.array([255,0,0]) # rgb_output[mask > 0]
depth[all_anchor[0]:all_anchor[1], all_anchor[2]:all_anchor[3]][mask > 0] = depth_output[mask > 0]
# nxs, nys = np.where(mask > -1)
# for nx, ny in zip(nxs, nys):
# info_on_pix[(nx, ny)][0]['color'] = rgb_output[]
nxs, nys = np.where((npath_map > -1))
for nx, ny in zip(nxs, nys):
n_id = npath_map[nx, ny]
four_nes = [xx for xx in [(nx + 1, ny), (nx - 1, ny), (nx, ny + 1), (nx, ny - 1)]\
if 0 <= xx[0] < fpath_map.shape[0] and 0 <= xx[1] < fpath_map.shape[1]]
for nex, ney in four_nes:
if fpath_map[nex, ney] == n_id:
na, nb = (nx + all_anchor[0], ny + all_anchor[2], info_on_pix[(nx + all_anchor[0], ny + all_anchor[2])][0]['depth']), \
(nex + all_anchor[0], ney + all_anchor[2], info_on_pix[(nex + all_anchor[0], ney + all_anchor[2])][0]['depth'])
if global_mesh.has_edge(na, nb):
global_mesh.remove_edge(na, nb)
nxs, nys = np.where((fpath_map > -1))
for nx, ny in zip(nxs, nys):
n_id = fpath_map[nx, ny]
four_nes = [xx for xx in [(nx + 1, ny), (nx - 1, ny), (nx, ny + 1), (nx, ny - 1)]\
if 0 <= xx[0] < npath_map.shape[0] and 0 <= xx[1] < npath_map.shape[1]]
for nex, ney in four_nes:
if npath_map[nex, ney] == n_id:
na, nb = (nx + all_anchor[0], ny + all_anchor[2], info_on_pix[(nx + all_anchor[0], ny + all_anchor[2])][0]['depth']), \
(nex + all_anchor[0], ney + all_anchor[2], info_on_pix[(nex + all_anchor[0], ney + all_anchor[2])][0]['depth'])
if global_mesh.has_edge(na, nb):
global_mesh.remove_edge(na, nb)
nxs, nys = np.where(mask > 0)
for x, y in zip(nxs, nys):
x = x + all_anchor[0]
y = y + all_anchor[2]
cur_node = (x, y, 0)
new_node = (x, y, -abs(depth[x, y]))
disp = 1. / -abs(depth[x, y])
mapping_dict = {cur_node: new_node}
info_on_pix, global_mesh = update_info(mapping_dict, info_on_pix, global_mesh)
global_mesh.nodes[new_node]['color'] = image[x, y]
global_mesh.nodes[new_node]['old_color'] = image[x, y]
global_mesh.nodes[new_node]['disp'] = disp
info_on_pix[(x, y)][0]['depth'] = -abs(depth[x, y])
info_on_pix[(x, y)][0]['disp'] = disp
info_on_pix[(x, y)][0]['color'] = image[x, y]
nxs, nys = np.where((npath_map > -1))
for nx, ny in zip(nxs, nys):
self_node = (nx + all_anchor[0], ny + all_anchor[2], info_on_pix[(nx + all_anchor[0], ny + all_anchor[2])][0]['depth'])
if global_mesh.has_node(self_node) is False:
break
n_id = int(round(npath_map[nx, ny]))
four_nes = [xx for xx in [(nx + 1, ny), (nx - 1, ny), (nx, ny + 1), (nx, ny - 1)]\
if 0 <= xx[0] < fpath_map.shape[0] and 0 <= xx[1] < fpath_map.shape[1]]
for nex, ney in four_nes:
ne_node = (nex + all_anchor[0], ney + all_anchor[2], info_on_pix[(nex + all_anchor[0], ney + all_anchor[2])][0]['depth'])
if global_mesh.has_node(ne_node) is False:
continue
if fpath_map[nex, ney] == n_id:
if global_mesh.nodes[self_node].get('edge_id') is None:
global_mesh.nodes[self_node]['edge_id'] = n_id
edge_ccs[n_id].add(self_node)
info_on_pix[(self_node[0], self_node[1])][0]['edge_id'] = n_id
if global_mesh.has_edge(self_node, ne_node) is True:
global_mesh.remove_edge(self_node, ne_node)
if global_mesh.nodes[self_node].get('far') is None:
global_mesh.nodes[self_node]['far'] = []
global_mesh.nodes[self_node]['far'].append(ne_node)
global_fpath_map = np.zeros_like(other_edge_with_id) - 1
global_fpath_map[all_anchor[0]:all_anchor[1], all_anchor[2]:all_anchor[3]] = fpath_map
fpath_ids = np.unique(global_fpath_map)
fpath_ids = fpath_ids[1:] if fpath_ids.shape[0] > 0 and fpath_ids[0] == -1 else []
fpath_real_id_map = np.zeros_like(global_fpath_map) - 1
for fpath_id in fpath_ids:
fpath_real_id = np.unique(((global_fpath_map == fpath_id).astype(np.int) * (other_edge_with_id + 1)) - 1)
fpath_real_id = fpath_real_id[1:] if fpath_real_id.shape[0] > 0 and fpath_real_id[0] == -1 else []
fpath_real_id = fpath_real_id.astype(np.int)
fpath_real_id = np.bincount(fpath_real_id).argmax()
fpath_real_id_map[global_fpath_map == fpath_id] = fpath_real_id
nxs, nys = np.where((fpath_map > -1))
for nx, ny in zip(nxs, nys):
self_node = (nx + all_anchor[0], ny + all_anchor[2], info_on_pix[(nx + all_anchor[0], ny + all_anchor[2])][0]['depth'])
n_id = fpath_map[nx, ny]
four_nes = [xx for xx in [(nx + 1, ny), (nx - 1, ny), (nx, ny + 1), (nx, ny - 1)]\
if 0 <= xx[0] < npath_map.shape[0] and 0 <= xx[1] < npath_map.shape[1]]
for nex, ney in four_nes:
ne_node = (nex + all_anchor[0], ney + all_anchor[2], info_on_pix[(nex + all_anchor[0], ney + all_anchor[2])][0]['depth'])
if global_mesh.has_node(ne_node) is False:
continue
if npath_map[nex, ney] == n_id or global_mesh.nodes[ne_node].get('edge_id') == n_id:
if global_mesh.has_edge(self_node, ne_node) is True:
global_mesh.remove_edge(self_node, ne_node)
if global_mesh.nodes[self_node].get('near') is None:
global_mesh.nodes[self_node]['near'] = []
if global_mesh.nodes[self_node].get('edge_id') is None:
f_id = int(round(fpath_real_id_map[self_node[0], self_node[1]]))
global_mesh.nodes[self_node]['edge_id'] = f_id
info_on_pix[(self_node[0], self_node[1])][0]['edge_id'] = f_id
edge_ccs[f_id].add(self_node)
global_mesh.nodes[self_node]['near'].append(ne_node)
return info_on_pix, global_mesh, image, depth, edge_ccs
# for edge_cc in edge_ccs:
# for edge_node in edge_cc:
# edge_ccs
# context_ccs, mask_ccs, broken_mask_ccs, edge_ccs, erode_context_ccs, init_mask_connect, edge_maps, extend_context_ccs, extend_edge_ccs
#export
def get_valid_size(imap):
x_max = np.where(imap.sum(1).squeeze() > 0)[0].max() + 1
x_min = np.where(imap.sum(1).squeeze() > 0)[0].min()
y_max = np.where(imap.sum(0).squeeze() > 0)[0].max() + 1
y_min = np.where(imap.sum(0).squeeze() > 0)[0].min()
size_dict = {'x_max':x_max, 'y_max':y_max, 'x_min':x_min, 'y_min':y_min}
return size_dict
#export
def dilate_valid_size(isize_dict, imap, dilate=[0, 0]):
osize_dict = copy.deepcopy(isize_dict)
osize_dict['x_min'] = max(0, osize_dict['x_min'] - dilate[0])
osize_dict['x_max'] = min(imap.shape[0], osize_dict['x_max'] + dilate[0])
osize_dict['y_min'] = max(0, osize_dict['y_min'] - dilate[0])
osize_dict['y_max'] = min(imap.shape[1], osize_dict['y_max'] + dilate[1])
return osize_dict
#export
def size_operation(size_a, size_b, operation):
assert operation == '+' or operation == '-', "Operation must be '+' (union) or '-' (exclude)"
osize = {}
if operation == '+':
osize['x_min'] = min(size_a['x_min'], size_b['x_min'])
osize['y_min'] = min(size_a['y_min'], size_b['y_min'])
osize['x_max'] = max(size_a['x_max'], size_b['x_max'])
osize['y_max'] = max(size_a['y_max'], size_b['y_max'])
assert operation != '-', "Operation '-' is undefined !"
return osize
#export
def fill_dummy_bord(mesh, info_on_pix, image, depth, config):
context = np.zeros_like(depth).astype(np.uint8)
context[mesh.graph['hoffset']:mesh.graph['hoffset'] + mesh.graph['noext_H'],
mesh.graph['woffset']:mesh.graph['woffset'] + mesh.graph['noext_W']] = 1
mask = 1 - context
xs, ys = np.where(mask > 0)
depth = depth * context
image = image * context[..., None]
cur_depth = 0
cur_disp = 0
color = [0, 0, 0]
for x, y in zip(xs, ys):
cur_node = (x, y, cur_depth)
mesh.add_node(cur_node, color=color,
synthesis=False,
disp=cur_disp,
cc_id=set(),
ext_pixel=True)
info_on_pix[(x, y)] = [{'depth':cur_depth,
'color':mesh.nodes[(x, y, cur_depth)]['color'],
'synthesis':False,
'disp':mesh.nodes[cur_node]['disp'],
'ext_pixel':True}]
# for x, y in zip(xs, ys):
four_nes = [(xx, yy) for xx, yy in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)] if\
0 <= x < mesh.graph['H'] and 0 <= y < mesh.graph['W'] and info_on_pix.get((xx, yy)) is not None]
for ne in four_nes:
# if (ne[0] - x) + (ne[1] - y) == 1 and info_on_pix.get((ne[0], ne[1])) is not None:
mesh.add_edge(cur_node, (ne[0], ne[1], info_on_pix[(ne[0], ne[1])][0]['depth']))
return mesh, info_on_pix
#export
def enlarge_border(mesh, info_on_pix, depth, image, config):
mesh.graph['hoffset'], mesh.graph['woffset'] = config['extrapolation_thickness'], config['extrapolation_thickness']
mesh.graph['bord_up'], mesh.graph['bord_left'], mesh.graph['bord_down'], mesh.graph['bord_right'] = \
0, 0, mesh.graph['H'], mesh.graph['W']
# new_image = np.pad(image,
# pad_width=((config['extrapolation_thickness'], config['extrapolation_thickness']),
# (config['extrapolation_thickness'], config['extrapolation_thickness']), (0, 0)),
# mode='constant')
# new_depth = np.pad(depth,
# pad_width=((config['extrapolation_thickness'], config['extrapolation_thickness']),
# (config['extrapolation_thickness'], config['extrapolation_thickness'])),
# mode='constant')
return mesh, info_on_pix, depth, image
#export
def fill_missing_node(mesh, info_on_pix, image, depth):
for x in range(mesh.graph['bord_up'], mesh.graph['bord_down']):
for y in range(mesh.graph['bord_left'], mesh.graph['bord_right']):
if info_on_pix.get((x, y)) is None:
print("fill missing node = ", x, y)
import pdb; pdb.set_trace()
re_depth, re_count = 0, 0
for ne in [(x + 1, y), (x - 1, y), (x, y + 1), (x, y - 1)]:
if info_on_pix.get(ne) is not None:
re_depth += info_on_pix[ne][0]['depth']
re_count += 1
if re_count == 0:
re_depth = -abs(depth[x, y])
else:
re_depth = re_depth / re_count
depth[x, y] = abs(re_depth)
info_on_pix[(x, y)] = [{'depth':re_depth,
'color':image[x, y],
'synthesis':False,
'disp':1./re_depth}]
mesh.add_node((x, y, re_depth), color=image[x, y],
synthesis=False,
disp=1./re_depth,
cc_id=set())
return mesh, info_on_pix, depth
#export
def refresh_bord_depth(mesh, info_on_pix, image, depth):
H, W = mesh.graph['H'], mesh.graph['W']
corner_nodes = [(mesh.graph['bord_up'], mesh.graph['bord_left']),
(mesh.graph['bord_up'], mesh.graph['bord_right'] - 1),
(mesh.graph['bord_down'] - 1, mesh.graph['bord_left']),
(mesh.graph['bord_down'] - 1, mesh.graph['bord_right'] - 1)]
# (0, W - 1), (H - 1, 0), (H - 1, W - 1)]
bord_nodes = []
bord_nodes += [(mesh.graph['bord_up'], xx) for xx in range(mesh.graph['bord_left'] + 1, mesh.graph['bord_right'] - 1)]
bord_nodes += [(mesh.graph['bord_down'] - 1, xx) for xx in range(mesh.graph['bord_left'] + 1, mesh.graph['bord_right'] - 1)]
bord_nodes += [(xx, mesh.graph['bord_left']) for xx in range(mesh.graph['bord_up'] + 1, mesh.graph['bord_down'] - 1)]
bord_nodes += [(xx, mesh.graph['bord_right'] - 1) for xx in range(mesh.graph['bord_up'] + 1, mesh.graph['bord_down'] - 1)]
for xy in bord_nodes:
tgt_loc = None
if xy[0] == mesh.graph['bord_up']:
tgt_loc = (xy[0] + 1, xy[1])# (1, xy[1])
elif xy[0] == mesh.graph['bord_down'] - 1:
tgt_loc = (xy[0] - 1, xy[1]) # (H - 2, xy[1])
elif xy[1] == mesh.graph['bord_left']:
tgt_loc = (xy[0], xy[1] + 1)
elif xy[1] == mesh.graph['bord_right'] - 1:
tgt_loc = (xy[0], xy[1] - 1)
if tgt_loc is not None:
ne_infos = info_on_pix.get(tgt_loc)
if ne_infos is None:
import pdb; pdb.set_trace()
# if ne_infos is not None and len(ne_infos) == 1:
tgt_depth = ne_infos[0]['depth']
tgt_disp = ne_infos[0]['disp']
new_node = (xy[0], xy[1], tgt_depth)
src_node = (tgt_loc[0], tgt_loc[1], tgt_depth)
tgt_nes_loc = [(xx[0], xx[1]) \
for xx in mesh.neighbors(src_node)]
tgt_nes_loc = [(xx[0] - tgt_loc[0] + xy[0], xx[1] - tgt_loc[1] + xy[1]) for xx in tgt_nes_loc \
if abs(xx[0] - xy[0]) == 1 and abs(xx[1] - xy[1]) == 1]
tgt_nes_loc = [xx for xx in tgt_nes_loc if info_on_pix.get(xx) is not None]
tgt_nes_loc.append(tgt_loc)
# if (xy[0], xy[1]) == (559, 60):
# import pdb; pdb.set_trace()
if info_on_pix.get(xy) is not None and len(info_on_pix.get(xy)) > 0:
old_depth = info_on_pix[xy][0].get('depth')
old_node = (xy[0], xy[1], old_depth)
mesh.remove_edges_from([(old_ne, old_node) for old_ne in mesh.neighbors(old_node)])
mesh.add_edges_from([((zz[0], zz[1], info_on_pix[zz][0]['depth']), old_node) for zz in tgt_nes_loc])
mapping_dict = {old_node: new_node}
# if old_node[2] == new_node[2]:
# print("mapping_dict = ", mapping_dict)
info_on_pix, mesh = update_info(mapping_dict, info_on_pix, mesh)
else:
info_on_pix[xy] = []
info_on_pix[xy][0] = info_on_pix[tgt_loc][0]
info_on_pix['color'] = image[xy[0], xy[1]]
info_on_pix['old_color'] = image[xy[0], xy[1]]
mesh.add_node(new_node)
mesh.add_edges_from([((zz[0], zz[1], info_on_pix[zz][0]['depth']), new_node) for zz in tgt_nes_loc])
mesh.nodes[new_node]['far'] = None
mesh.nodes[new_node]['near'] = None
if mesh.nodes[src_node].get('far') is not None:
redundant_nodes = [ne for ne in mesh.nodes[src_node]['far'] if (ne[0], ne[1]) == xy]
[mesh.nodes[src_node]['far'].remove(aa) for aa in redundant_nodes]
if mesh.nodes[src_node].get('near') is not None:
redundant_nodes = [ne for ne in mesh.nodes[src_node]['near'] if (ne[0], ne[1]) == xy]
[mesh.nodes[src_node]['near'].remove(aa) for aa in redundant_nodes]
for xy in corner_nodes:
hx, hy = xy
four_nes = [xx for xx in [(hx + 1, hy), (hx - 1, hy), (hx, hy + 1), (hx, hy - 1)] if \
mesh.graph['bord_up'] <= xx[0] < mesh.graph['bord_down'] and \
mesh.graph['bord_left'] <= xx[1] < mesh.graph['bord_right']]
ne_nodes = []
ne_depths = []
for ne_loc in four_nes:
if info_on_pix.get(ne_loc) is not None:
ne_depths.append(info_on_pix[ne_loc][0]['depth'])
ne_nodes.append((ne_loc[0], ne_loc[1], info_on_pix[ne_loc][0]['depth']))
new_node = (xy[0], xy[1], float(np.mean(ne_depths)))
if info_on_pix.get(xy) is not None and len(info_on_pix.get(xy)) > 0:
old_depth = info_on_pix[xy][0].get('depth')
old_node = (xy[0], xy[1], old_depth)
mesh.remove_edges_from([(old_ne, old_node) for old_ne in mesh.neighbors(old_node)])
mesh.add_edges_from([(zz, old_node) for zz in ne_nodes])
mapping_dict = {old_node: new_node}
info_on_pix, mesh = update_info(mapping_dict, info_on_pix, mesh)
else:
info_on_pix[xy] = []
info_on_pix[xy][0] = info_on_pix[ne_loc[-1]][0]
info_on_pix['color'] = image[xy[0], xy[1]]
info_on_pix['old_color'] = image[xy[0], xy[1]]
mesh.add_node(new_node)
mesh.add_edges_from([(zz, new_node) for zz in ne_nodes])
mesh.nodes[new_node]['far'] = None
mesh.nodes[new_node]['near'] = None
for xy in bord_nodes + corner_nodes:
# if (xy[0], xy[1]) == (559, 60):
# import pdb; pdb.set_trace()
depth[xy[0], xy[1]] = abs(info_on_pix[xy][0]['depth'])
for xy in bord_nodes:
cur_node = (xy[0], xy[1], info_on_pix[xy][0]['depth'])
nes = mesh.neighbors(cur_node)
four_nes = set([(xy[0] + 1, xy[1]), (xy[0] - 1, xy[1]), (xy[0], xy[1] + 1), (xy[0], xy[1] - 1)]) - \
set([(ne[0], ne[1]) for ne in nes])
four_nes = [ne for ne in four_nes if mesh.graph['bord_up'] <= ne[0] < mesh.graph['bord_down'] and \
mesh.graph['bord_left'] <= ne[1] < mesh.graph['bord_right']]
four_nes = [(ne[0], ne[1], info_on_pix[(ne[0], ne[1])][0]['depth']) for ne in four_nes]
mesh.nodes[cur_node]['far'] = []
mesh.nodes[cur_node]['near'] = []
for ne in four_nes:
if abs(ne[2]) >= abs(cur_node[2]):
mesh.nodes[cur_node]['far'].append(ne)
else:
mesh.nodes[cur_node]['near'].append(ne)
return mesh, info_on_pix, depth
#export
def get_union_size(mesh, dilate, *alls_cc):
all_cc = reduce(lambda x, y: x | y, [set()] + [*alls_cc])
min_x, min_y, max_x, max_y = mesh.graph['H'], mesh.graph['W'], 0, 0
H, W = mesh.graph['H'], mesh.graph['W']
for node in all_cc:
if node[0] < min_x:
min_x = node[0]
if node[0] > max_x:
max_x = node[0]
if node[1] < min_y:
min_y = node[1]
if node[1] > max_y:
max_y = node[1]
max_x = max_x + 1
max_y = max_y + 1
# mask_size = dilate_valid_size(mask_size, edge_dict['mask'], dilate=[20, 20])
osize_dict = dict()
osize_dict['x_min'] = max(0, min_x - dilate[0])
osize_dict['x_max'] = min(H, max_x + dilate[0])
osize_dict['y_min'] = max(0, min_y - dilate[1])
osize_dict['y_max'] = min(W, max_y + dilate[1])
return osize_dict
#export
def incomplete_node(mesh, edge_maps, info_on_pix):
vis_map = np.zeros((mesh.graph['H'], mesh.graph['W']))
for node in mesh.nodes:
if mesh.nodes[node].get('synthesis') is not True:
connect_all_flag = False
nes = [xx for xx in mesh.neighbors(node) if mesh.nodes[xx].get('synthesis') is not True]
if len(nes) < 3 and 0 < node[0] < mesh.graph['H'] - 1 and 0 < node[1] < mesh.graph['W'] - 1:
if len(nes) <= 1:
connect_all_flag = True
else:
dan_ne_node_a = nes[0]
dan_ne_node_b = nes[1]
if abs(dan_ne_node_a[0] - dan_ne_node_b[0]) > 1 or \
abs(dan_ne_node_a[1] - dan_ne_node_b[1]) > 1:
connect_all_flag = True
if connect_all_flag == True:
vis_map[node[0], node[1]] = len(nes)
four_nes = [(node[0] - 1, node[1]), (node[0] + 1, node[1]), (node[0], node[1] - 1), (node[0], node[1] + 1)]
for ne in four_nes:
for info in info_on_pix[(ne[0], ne[1])]:
ne_node = (ne[0], ne[1], info['depth'])
if info.get('synthesis') is not True and mesh.has_node(ne_node):
mesh.add_edge(node, ne_node)
break
return mesh
#export
def edge_inpainting(edge_id, context_cc, erode_context_cc, mask_cc, edge_cc, extend_edge_cc,
mesh, edge_map, edge_maps_with_id, config, union_size, depth_edge_model, inpaint_iter):
edge_dict = get_edge_from_nodes(context_cc, erode_context_cc, mask_cc, edge_cc, extend_edge_cc,
mesh.graph['H'], mesh.graph['W'], mesh)
edge_dict['edge'], end_depth_maps, _ = \
filter_irrelevant_edge_new(edge_dict['self_edge'] + edge_dict['comp_edge'],
edge_map,
edge_maps_with_id,
edge_id,
edge_dict['context'],
edge_dict['depth'], mesh, context_cc | erode_context_cc, spdb=True)
patch_edge_dict = dict()
patch_edge_dict['mask'], patch_edge_dict['context'], patch_edge_dict['rgb'], \
patch_edge_dict['disp'], patch_edge_dict['edge'] = \
crop_maps_by_size(union_size, edge_dict['mask'], edge_dict['context'],
edge_dict['rgb'], edge_dict['disp'], edge_dict['edge'])
tensor_edge_dict = convert2tensor(patch_edge_dict)
if require_depth_edge(patch_edge_dict['edge'], patch_edge_dict['mask']) and inpaint_iter == 0:
with torch.no_grad():
device = config["gpu_ids"] if isinstance(config["gpu_ids"], int) and config["gpu_ids"] >= 0 else "cpu"
depth_edge_output = depth_edge_model.forward_3P(tensor_edge_dict['mask'],
tensor_edge_dict['context'],
tensor_edge_dict['rgb'],
tensor_edge_dict['disp'],
tensor_edge_dict['edge'],
unit_length=128,
cuda=device)
depth_edge_output = depth_edge_output.cpu()
tensor_edge_dict['output'] = (depth_edge_output > config['ext_edge_threshold']).float() * tensor_edge_dict['mask'] + tensor_edge_dict['edge']
else:
tensor_edge_dict['output'] = tensor_edge_dict['edge']
depth_edge_output = tensor_edge_dict['edge'] + 0
patch_edge_dict['output'] = tensor_edge_dict['output'].squeeze().data.cpu().numpy()
edge_dict['output'] = np.zeros((mesh.graph['H'], mesh.graph['W']))
edge_dict['output'][union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \
patch_edge_dict['output']
return edge_dict, end_depth_maps
#export
def depth_inpainting(context_cc, extend_context_cc, erode_context_cc, mask_cc, mesh, config, union_size, depth_feat_model, edge_output, given_depth_dict=False, spdb=False):
if given_depth_dict is False:
depth_dict = get_depth_from_nodes(context_cc | extend_context_cc, erode_context_cc, mask_cc, mesh.graph['H'], mesh.graph['W'], mesh, config['log_depth'])
if edge_output is not None:
depth_dict['edge'] = edge_output
else:
depth_dict = given_depth_dict
patch_depth_dict = dict()
patch_depth_dict['mask'], patch_depth_dict['context'], patch_depth_dict['depth'], \
patch_depth_dict['zero_mean_depth'], patch_depth_dict['edge'] = \
crop_maps_by_size(union_size, depth_dict['mask'], depth_dict['context'],
depth_dict['real_depth'], depth_dict['zero_mean_depth'], depth_dict['edge'])
tensor_depth_dict = convert2tensor(patch_depth_dict)
resize_mask = open_small_mask(tensor_depth_dict['mask'], tensor_depth_dict['context'], 3, 41)
with torch.no_grad():
device = config["gpu_ids"] if isinstance(config["gpu_ids"], int) and config["gpu_ids"] >= 0 else "cpu"
depth_output = depth_feat_model.forward_3P(resize_mask,
tensor_depth_dict['context'],
tensor_depth_dict['zero_mean_depth'],
tensor_depth_dict['edge'],
unit_length=128,
cuda=device)
depth_output = depth_output.cpu()
tensor_depth_dict['output'] = torch.exp(depth_output + depth_dict['mean_depth']) * \
tensor_depth_dict['mask'] + tensor_depth_dict['depth']
patch_depth_dict['output'] = tensor_depth_dict['output'].data.cpu().numpy().squeeze()
depth_dict['output'] = np.zeros((mesh.graph['H'], mesh.graph['W']))
depth_dict['output'][union_size['x_min']:union_size['x_max'], union_size['y_min']:union_size['y_max']] = \
patch_depth_dict['output']
depth_output = depth_dict['output'] * depth_dict['mask'] + depth_dict['depth'] * depth_dict['context']
depth_output = smooth_cntsyn_gap(depth_dict['output'].copy() * depth_dict['mask'] + depth_dict['depth'] * depth_dict['context'],
depth_dict['mask'], depth_dict['context'],
init_mask_region=depth_dict['mask'])
if spdb is True:
f, ((ax1, ax2)) = plt.subplots(1, 2, sharex=True, sharey=True);
ax1.imshow(depth_output * depth_dict['mask'] + depth_dict['depth']); ax2.imshow(depth_dict['output'] * depth_dict['mask'] + depth_dict['depth']); plt.show()
import pdb; pdb.set_trace()
depth_dict['output'] = depth_output * depth_dict['mask'] + depth_dict['depth'] * depth_dict['context']
return depth_dict
#export
def update_info(mapping_dict, info_on_pix, *meshes):
rt_meshes = []
for mesh in meshes:
rt_meshes.append(relabel_node(mesh, mesh.nodes, [*mapping_dict.keys()][0], [*mapping_dict.values()][0]))
x, y, _ = [*mapping_dict.keys()][0]
info_on_pix[(x, y)][0]['depth'] = [*mapping_dict.values()][0][2]
return [info_on_pix] + rt_meshes
#export
def build_connection(mesh, cur_node, dst_node):
if (abs(cur_node[0] - dst_node[0]) + abs(cur_node[1] - dst_node[1])) < 2:
mesh.add_edge(cur_node, dst_node)
if abs(cur_node[0] - dst_node[0]) > 1 or abs(cur_node[1] - dst_node[1]) > 1:
return mesh
ne_nodes = [*mesh.neighbors(cur_node)].copy()
for ne_node in ne_nodes:
if mesh.has_edge(ne_node, dst_node) or ne_node == dst_node:
continue
else:
mesh = build_connection(mesh, ne_node, dst_node)
return mesh
#export
def recursive_add_edge(edge_mesh, mesh, info_on_pix, cur_node, mark):
ne_nodes = [(x[0], x[1]) for x in edge_mesh.neighbors(cur_node)]
for node_xy in ne_nodes:
node = (node_xy[0], node_xy[1], info_on_pix[node_xy][0]['depth'])
if mark[node[0], node[1]] != 3:
continue
else:
mark[node[0], node[1]] = 0
mesh.remove_edges_from([(xx, node) for xx in mesh.neighbors(node)])
mesh = build_connection(mesh, cur_node, node)
re_info = dict(depth=0, count=0)
for re_ne in mesh.neighbors(node):
re_info['depth'] += re_ne[2]
re_info['count'] += 1.
try:
re_depth = re_info['depth'] / re_info['count']
except:
re_depth = node[2]
re_node = (node_xy[0], node_xy[1], re_depth)
mapping_dict = {node: re_node}
info_on_pix, edge_mesh, mesh = update_info(mapping_dict, info_on_pix, edge_mesh, mesh)
edge_mesh, mesh, mark, info_on_pix = recursive_add_edge(edge_mesh, mesh, info_on_pix, re_node, mark)
return edge_mesh, mesh, mark, info_on_pix
#export
def resize_for_edge(tensor_dict, largest_size):
resize_dict = {k: v.clone() for k, v in tensor_dict.items()}
frac = largest_size / np.array([*resize_dict['edge'].shape[-2:]]).max()
if frac < 1:
resize_mark = torch.nn.functional.interpolate(torch.cat((resize_dict['mask'],
resize_dict['context']),
dim=1),
scale_factor=frac,
mode='bilinear')
resize_dict['mask'] = (resize_mark[:, 0:1] > 0).float()
resize_dict['context'] = (resize_mark[:, 1:2] == 1).float()
resize_dict['context'][resize_dict['mask'] > 0] = 0
resize_dict['edge'] = torch.nn.functional.interpolate(resize_dict['edge'],
scale_factor=frac,
mode='bilinear')
resize_dict['edge'] = (resize_dict['edge'] > 0).float()
resize_dict['edge'] = resize_dict['edge'] * resize_dict['context']
resize_dict['disp'] = torch.nn.functional.interpolate(resize_dict['disp'],
scale_factor=frac,
mode='nearest')
resize_dict['disp'] = resize_dict['disp'] * resize_dict['context']
resize_dict['rgb'] = torch.nn.functional.interpolate(resize_dict['rgb'],
scale_factor=frac,
mode='bilinear')
resize_dict['rgb'] = resize_dict['rgb'] * resize_dict['context']
return resize_dict
#export
def get_map_from_nodes(nodes, height, width):
omap = np.zeros((height, width))
for n in nodes:
omap[n[0], n[1]] = 1
return omap
#export
def get_map_from_ccs(ccs, height, width, condition_input=None, condition=None, real_id=False, id_shift=0):
if condition is None:
condition = lambda x, condition_input: True
if real_id is True:
omap = np.zeros((height, width)) + (-1) + id_shift
else:
omap = np.zeros((height, width))
for cc_id, cc in enumerate(ccs):
for n in cc:
if condition(n, condition_input):
if real_id is True:
omap[n[0], n[1]] = cc_id + id_shift
else:
omap[n[0], n[1]] = 1
return omap
#export
def revise_map_by_nodes(nodes, imap, operation, limit_constr=None):
assert operation == '+' or operation == '-', "Operation must be '+' (union) or '-' (exclude)"
omap = copy.deepcopy(imap)
revise_flag = True
if operation == '+':
for n in nodes:
omap[n[0], n[1]] = 1
if limit_constr is not None and omap.sum() > limit_constr:
omap = imap
revise_flag = False
elif operation == '-':
for n in nodes:
omap[n[0], n[1]] = 0
if limit_constr is not None and omap.sum() < limit_constr:
omap = imap
revise_flag = False
return omap, revise_flag
#export
def repaint_info(mesh, cc, x_anchor, y_anchor, source_type):
if source_type == 'rgb':
feat = np.zeros((3, x_anchor[1] - x_anchor[0], y_anchor[1] - y_anchor[0]))
else:
feat = np.zeros((1, x_anchor[1] - x_anchor[0], y_anchor[1] - y_anchor[0]))
for node in cc:
if source_type == 'rgb':
feat[:, node[0] - x_anchor[0], node[1] - y_anchor[0]] = np.array(mesh.nodes[node]['color']) / 255.
elif source_type == 'd':
feat[:, node[0] - x_anchor[0], node[1] - y_anchor[0]] = abs(node[2])
return feat
#export
def get_context_from_nodes(mesh, cc, H, W, source_type=''):
if 'rgb' in source_type or 'color' in source_type:
feat = np.zeros((H, W, 3))
else:
feat = np.zeros((H, W))
context = np.zeros((H, W))
for node in cc:
if 'rgb' in source_type or 'color' in source_type:
feat[node[0], node[1]] = np.array(mesh.nodes[node]['color']) / 255.
context[node[0], node[1]] = 1
else:
feat[node[0], node[1]] = abs(node[2])
return feat, context
#export
def get_mask_from_nodes(mesh, cc, H, W):
mask = np.zeros((H, W))
for node in cc:
mask[node[0], node[1]] = abs(node[2])
return mask
#export
def get_edge_from_nodes(context_cc, erode_context_cc, mask_cc, edge_cc, extend_edge_cc, H, W, mesh):
context = np.zeros((H, W))
mask = np.zeros((H, W))
rgb = np.zeros((H, W, 3))
disp = np.zeros((H, W))
depth = np.zeros((H, W))
real_depth = np.zeros((H, W))
edge = np.zeros((H, W))
comp_edge = np.zeros((H, W))
fpath_map = np.zeros((H, W)) - 1
npath_map = np.zeros((H, W)) - 1
near_depth = np.zeros((H, W))
for node in context_cc:
rgb[node[0], node[1]] = np.array(mesh.nodes[node]['color'])
disp[node[0], node[1]] = mesh.nodes[node]['disp']
depth[node[0], node[1]] = node[2]
context[node[0], node[1]] = 1
for node in erode_context_cc:
rgb[node[0], node[1]] = np.array(mesh.nodes[node]['color'])
disp[node[0], node[1]] = mesh.nodes[node]['disp']
depth[node[0], node[1]] = node[2]
context[node[0], node[1]] = 1
rgb = rgb / 255.
disp = np.abs(disp)
disp = disp / disp.max()
real_depth = depth.copy()
for node in context_cc:
if mesh.nodes[node].get('real_depth') is not None:
real_depth[node[0], node[1]] = mesh.nodes[node]['real_depth']
for node in erode_context_cc:
if mesh.nodes[node].get('real_depth') is not None:
real_depth[node[0], node[1]] = mesh.nodes[node]['real_depth']
for node in mask_cc:
mask[node[0], node[1]] = 1
near_depth[node[0], node[1]] = node[2]
for node in edge_cc:
edge[node[0], node[1]] = 1
for node in extend_edge_cc:
comp_edge[node[0], node[1]] = 1
rt_dict = {'rgb': rgb, 'disp': disp, 'depth': depth, 'real_depth': real_depth, 'self_edge': edge, 'context': context,
'mask': mask, 'fpath_map': fpath_map, 'npath_map': npath_map, 'comp_edge': comp_edge, 'valid_area': context + mask,
'near_depth': near_depth}
return rt_dict
#export
def get_depth_from_maps(context_map, mask_map, depth_map, H, W, log_depth=False):
context = context_map.astype(np.uint8)
mask = mask_map.astype(np.uint8).copy()
depth = np.abs(depth_map)
real_depth = depth.copy()
zero_mean_depth = np.zeros((H, W))
if log_depth is True:
log_depth = np.log(real_depth + 1e-8) * context
mean_depth = np.mean(log_depth[context > 0])
zero_mean_depth = (log_depth - mean_depth) * context
else:
zero_mean_depth = real_depth
mean_depth = 0
edge = np.zeros_like(depth)
rt_dict = {'depth': depth, 'real_depth': real_depth, 'context': context, 'mask': mask,
'mean_depth': mean_depth, 'zero_mean_depth': zero_mean_depth, 'edge': edge}
return rt_dict
#export
def get_depth_from_nodes(context_cc, erode_context_cc, mask_cc, H, W, mesh, log_depth=False):
context = np.zeros((H, W))
mask = np.zeros((H, W))
depth = np.zeros((H, W))
real_depth = np.zeros((H, W))
zero_mean_depth = np.zeros((H, W))
for node in context_cc:
depth[node[0], node[1]] = node[2]
context[node[0], node[1]] = 1
for node in erode_context_cc:
depth[node[0], node[1]] = node[2]
context[node[0], node[1]] = 1
depth = np.abs(depth)
real_depth = depth.copy()
for node in context_cc:
if mesh.nodes[node].get('real_depth') is not None:
real_depth[node[0], node[1]] = mesh.nodes[node]['real_depth']
for node in erode_context_cc:
if mesh.nodes[node].get('real_depth') is not None:
real_depth[node[0], node[1]] = mesh.nodes[node]['real_depth']
real_depth = np.abs(real_depth)
for node in mask_cc:
mask[node[0], node[1]] = 1
if log_depth is True:
log_depth = np.log(real_depth + 1e-8) * context
mean_depth = np.mean(log_depth[context > 0])
zero_mean_depth = (log_depth - mean_depth) * context
else:
zero_mean_depth = real_depth
mean_depth = 0
rt_dict = {'depth': depth, 'real_depth': real_depth, 'context': context, 'mask': mask,
'mean_depth': mean_depth, 'zero_mean_depth': zero_mean_depth}
return rt_dict
#export
def get_rgb_from_nodes(context_cc, erode_context_cc, mask_cc, H, W, mesh):
context = np.zeros((H, W))
mask = np.zeros((H, W))
rgb = np.zeros((H, W, 3))
erode_context = np.zeros((H, W))
for node in context_cc:
rgb[node[0], node[1]] = np.array(mesh.nodes[node]['color'])
context[node[0], node[1]] = 1
rgb = rgb / 255.
for node in mask_cc:
mask[node[0], node[1]] = 1
for node in erode_context_cc:
erode_context[node[0], node[1]] = 1
mask[node[0], node[1]] = 1
rt_dict = {'rgb': rgb, 'context': context, 'mask': mask,
'erode': erode_context}
return rt_dict
#export
def crop_maps_by_size(size, *imaps):
omaps = []
for imap in imaps:
omaps.append(imap[size['x_min']:size['x_max'], size['y_min']:size['y_max']].copy())
return omaps
#export
def convert2tensor(input_dict):
rt_dict = {}
for key, value in input_dict.items():
if 'rgb' in key or 'color' in key:
rt_dict[key] = torch.FloatTensor(value).permute(2, 0, 1)[None, ...]
else:
rt_dict[key] = torch.FloatTensor(value)[None, None, ...]
return rt_dict
| nbs/00_mesh_tools.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tarea 4. Midiendo rendimiento y riesgo en un portafolio.
#
# <img style="float: right; margin: 0px 0px 15px 15px;" src="data:image/jpeg;base64,/9j/4AAQSkZJRgABAQAAAQABAAD/2wCEAAkGBxMSEhUTExIVFhUXFxgWGRYXGBgYGRcXGBUYFxoYHR8YHiggGholGxgXIjEhJSkrLi4uGB8zODMtNygtLisBCgoKDg0OGBAQGy0dHSUtLS0tLS0tLS0tLi0rLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLS0tLf/AABEIALcBEwMBIgACEQEDEQH/xAAcAAABBQEBAQAAAAAAAAAAAAAAAwQFBgcCAQj/xABCEAACAQIDBQUECAQGAgIDAAABAgMAEQQSIQUGMUFRBxMiYXEygZGhFEJSYnKxwdEjM+HwFTSCkqKyQ1PC8SRj0v/EABoBAQEBAAMBAAAAAAAAAAAAAAABAgMEBQb/xAAqEQEAAgEDBAIBAgcAAAAAAAAAARECAxIhBDFBUQUT0RSBIjJhcZGhwf/aAAwDAQACEQMRAD8A3GisUx/ati29hUjHKyXPxkJB/wBtQON3zxkws2If/cwHwTKKlj6AxePiiF5JUQfeYL+ZqBx2/wDgIv8AzZz0jVm+dsvzrBHxDE3L6njYW/O5+dJM1+p9dfzpyNfx3a3CL91A7fjYL/0zU02LvzjdoYhYIRHCCCzOULZVFrnxHU6gAW51letTu5e0mw+MjdfrZo9eBLiy/wDPJRCWNkaabFKsgbu8TKc0xCscxAY+EWt3isQBwDV5gth4mb2VYj7qG3xawrRdq7Jw+GxTY1bIkscgnU693OGWW56Ehnv6C2lNm30w6DRmb8K/vauG26QWD7P5W9sW/E/6Lb86nsFuDGvtMP8ASo/M3Pzoi3oxU3+XwUj/AHiDb4gW+dPItn7Zm49zAD1IJ+WalZScH2H3aw6cVzfi1/OnMmIw0I1aNbdSKZx7gzSf5jHyt92MZR8Sf0qRwfZ7gU1aNpD1kcn5LYfKr9clorEb54VdFcueiKTSS7w4mX/L4GZvvOMq/Hh86vOC2VBD/KhjT8KgfMCnla+uPKWoUezdrS8TBAPM5m/43Hzp1FuO7/5jHTP91LRj9audFajDH0WgMFuZgo9e4Dn7UhLn/lpU3DAqCyKqjooAHypSitIKKKKAopKfEIgu7qo6sQB86iZ97cGpt9IRj0S7/wDQGpcCboqJwe8EMvsm34mRb+4tf5U5baKcmQ/61/epugo9opj/AIiOgPo8f/8AVNNo7xRwjUFm45QVvrfz8uVN0LSZoqlzb5Tn2MMAOrMT8rL+dMW29jZD7ap+BB/8sx+dTfBTQqa4raUMf8yWNfJmAPzNUP6FiJT45JXHS7W+DG1OsPu0fsAep/QWqbsp7QUsE+9mGXgzOeiqfzay/OmT73Fv5eHY+bG3/UEf8q5g3aA4kD8Kj9b0/i2CnO59TSspOEPLt7FNzRPQC/zz/lXeDeZ2Bdi/4g2X1sTlv55ascWzUXgoHupwmHAps9yWZDD5tWFyeJoqStRWtsFvlO9Fq1PAdjh4zYseYjT/AOTH9Ksmz+y7Z8ftLJKf/wBjkfKPKLVqmWECpHA7KxE38qCWT8KMR8QLV9E4Hd7CQ/ysNCh6hFv8bXqSAoMFwPZ5tCT/AMAQdZHVfkCW+VXPA9ksICmTESlxY/w8qqGGotcEkA/lWkUUVRdqqRjIFk/l4pO7kUeyZY7o3xug8wtTmC3dw8JGSCNSOeUE/E3NR3aHhyMP3yi7QSpMPS+Uj42b3VZsNOsqJIvB1DD0IvWMeLWS0aV3QKK2gooooCiiigKKi8fvHhIb95iIlI5ZgW+AuflVexnaVhV0iSWY9VXKvxax+VZ3QLrXDyAcT7udUDDb7YyZxlwgSP8AFdj5XNgL68ja/lrLwYfFzX7xxEp5RjKbX5kkk6elTf6WkvtHbkUI8bheg4sfRV1qrbT2jtDGNkwaGGPnK/tN6KPZ95qx7P3dgiObLnbmzXJ+f61MqLaAWpUz3Gd4Ds3JJfEzd45OrEk/L+tT+G3Kw6829FyqPyJqzUVdsFo2LYWHUW7oH1ufzrr/AATD/wDpQegt+VSFJ4iZUVnY2VQWJPAAC5Pwq0iPn2RAB/LF/U/vUa2wkJvqPQmqzB2qLiJe7wmAxOIAIu4FrKTbPYBiB+K1aUqAUqFtDQbBiH1L+utPThY41LHKqgXJNgABxJJ4CvMbtrDQnLLiIYz0eRFPwJpdXinjNmSSNwVOUhlYEWIuNDpQNNlbSw2IzdxNHLkIDd2wYKTwBI0qC2Xv7hpsc2AEcqSqXUmQKqlk4qLMTqLkG2tqzrceU7J2zJg3JEcjdzc8wfFA/qQQP9R6Vau1XbmL2e8UuGESxykq790pcyLqAWPJkvyv4DrRGlWphtDbmGgNpsTDEejyKp+BN6r29O8rf4O+NwxszxoVYcUzuqMfxLc+8VTuyvc3B4zDvicSpnlMjKQzt4bAG5sQWY3vc3oNZwO0Ip1zQyxyL1RlYf8AE05rEd+93m2LPFjMA7Rxu2UoSWCuBmym+rRsAdDe1jrqLa9sDaYxWGixCiwkQNboSNR7jcUEhRRRVBRRRQFFFFAUUUUDXaWFWWNkceF1ZG/C4sf0qA3GkKQHDSMO9w7tGy8wt7qbfZIIIPTTlVoYXFqp28+7zM3fwO8eIAyiVSbFfsutjceluV71ieJtY5XG9NcZtOGEXlljQffZV/M1mD7H2hLpNjpSOkauL/8AUU4wXZ+l7skjnrI9h8F1+dN0lLPju0PAR6CUyHpGrH4E2U/GoeXtIkk0w2Bkfzc5fkoP509wm5SLqEjX0W//AGJqZh2Ao4kn8qVlJwp8m2NrzfWhw4+6AW/5Zj8qbvu1PP8A5jFzy34rc5PgTYfCtFi2Ui8hTlMMo5U2ey2f4DceJdcl/wAWtWDC7uqPqge6pna2PiwsLzykiOMZmIUsQOF7KCTWf47tjgvkwuEnnc8LjID6Bcz/APGrtgtfMPswLwp9HEBWQbT7R9sQgSvs5IobjWSKbnwBbOMpPmtX/cXe1NpYfvFXJIpyyR3vla1wQbC6kcDbr0qoslFUjfbtEjwMgw0UTYjEm38NTYLm9kEgEljyUAn0uKr20N+NuYZe/wARs2JYBqbZsyj7zLI2X1KVRrFR+1tuYbCqHxE8cSk5QWYC5HEDmT6VWsJtmHbmzp44i0chQqyE2aOS2ZDccVzAa87EdRWVdmWx4sZjBhcYGZIklZIsxUCTOuceEg8iSAdSKCc7W97+8mwwwmLJRQsxRQyqXDBo3zEASLpwBIBXrVqwnaA2Ow+IGH2fLIyxA5H8KTBjkkRWUNqASbcT5VJ717kYeTZ8kEEKK6RAQta7gxkuiZzdspNxx4MarXYPtzPDLg2OsZ7xAfsOfEPc+v8ArqChbpzbQwWKbBwMuHnmKxsJgCFOUslzZhezaGxvmrU+0nbOLweyo7uBiJGSGSWMmykozOymwIvksDYWzVXO3HZDRy4fHxaG4jZhydLvE3yYf6RV6aCHbOzFzGwmQNccY5VPEeauCLcxcc6CndnfZ3gcTgkxOIVppJczE52UL4yLeEi7aXJPM1Bbew0u72PjfDOxw8ozd2xvmVSA8bciRcENx8Q87y2wcLtnY+aFMKuLgLFhkbS55rrmS/MFSPPmfdo7tbT2ziI3xkS4WCO4AuCwViC9hckscoFzYCw06h721bKEkeH2lD0VGYccreOJ/cSR/qFWVgNtbG5d6yf7cRH+QJHwerRtPYsU+FfCMCImj7vS11AFlIvfVbAi/MCmu6m60Gz42jgMhDHM2dsxLWtfQADS3AchVGd9jm0UmhxGzMQLghmCN9lvDInkQbH3mlsHuhtTZMzts8piIX4xuQpIHDMGKjMOGZTrfhWoYXZsMbM0cMaMxLMyoqlixuSSBckmnVShk2193tsbWaNMWkOFgRs1gQxva2ays2ZrEgXKjU1p2ydnph4Y4Ix4I1CC/GwHE+Z4++ndFUFFFFAUUUUBRRRQFFFFAUUUUHhUdKLVzNKqKXZgqqCzMTYAAXJJPAAVneM7WY2do8Fg8RiyvNQVBHUWVmt5lRQaK5IBsLm2g4XPTyqi7jdow2hiXw7YfuCqF1BfMxKsFdSMosRcaa8+ld7m9pEWOmOHfDywTC/hN3XTiCQAUOn1gKoG8Uf+F7fE40jeQT6cMk11l9dTIfhQbniMQkYzO6qOrEAfE0YXEpIoeN1dG1DKQVI6gjQisH2xtSHbG1o1xEwjwiNkjzaXUHXX6rSEDU2sLDiK3qKMKoVQAoAAA0AAFgB5WoE8dhVljeJxdXVkYdQwIPyNYL2fbZGydoTxYlisfjikOVm8UbEo9lBJB1AsPrg19A1hfbZsjucbHilUZZ1Ga40MkVhr1umT/aaglt8O06HFwS4TCQSytKpjuV4BtCVUXYnpcCxqZ7HN1Z8HFNLiFMbTZLRHiqpmsWtwJzcOQGvSrfuucO+Gilw8UcaSIrgIqqBcajQcjp7qlqowLaM/0DeJpsQPB3xkzEX/AIcqFQ48lvb/AEGtrx+0sL9HaSWWIwMhzMWBRkI1H3rjSw40hvJuvhcegXExBsvsuCVdfRhrby4Gqlh+xnZytmZ8Q4+yzoB8URW+dBUuwfDyfTZ3QN3AhKsTzYyKYwfvZQ5956063n3exeF22uKwmGllRmWfwLceIlZkJ4AnxHUj+YOla7srZcOGjEUEaxoOCqLa9TzJ8zrTyg8FZttfslSTEvPh8W+HDksyKt7FvaCsGUhSb6G/HpWlUUEFBurB9CTAzZp4kCj+IbE5GDLqlrWsALchapHZWy4cNGIoI1jQEnKosLnifWnlFAUUUnJOqi5ItQKUU0OPGmVHa9wLDQ28z6GuGxEhAsFVr+JT47D1XQG3WgfV4zAakgUyjWQeJ3FrcwAB52B/XrUNi9s4KDQt3hH1V8XX3czUsWA4peRzHouv5V13jdAo6sf0H71UDvLiZfDhsOEX7T/twpF9jzzeLE4k2+yDYfKs7lpaJNsQAkHEoCOIzLXlVkbKwI0NiRz40VNy7V5rN9u9qyLMcPgcO+KkBK3F8pI45QoLOB1sB0JrQsZB3kbpe2ZWW45XBF6wHYUm0NhzyD6EZMwCFu7dkZVOjI6aC/T0uNK35RZdo9oO2cMomxGz0jhuBdkcWvwBYOcpPmKve5++MOPwrYkAx93cSq3BMozEg/WW2t/XpWeT9rGKZSJNmKycw3eWNtdbparFuX2mQY5xhpYO4dwVUBg8b6G63spBIvpb31RSsNv+8e1nxUszT4cd7Egi9nuSbpkUkC91S5Op11Ola3sbeqKfBNjWR4Y1EjMsgsyrGTr53ABFutZJ2ubAjwGIw8uFjWJGXRUFlEsTZr+pDL/trXMbGNpbNYIbDE4fwnoXS4+BqeBnWzdt7U23NIMNOMHhk4lR4hmvlBI8TPYE6FQPhdvvJgNrbGC4hdoSYiIsFbvC7BSeAZJGbwnhmBBv00ph2a7zDZOInw2NRow5XN4STG63FyBqVIPEX4A8DU92lb+YXF4Y4LCZp5JmQeFG0CuH0BF2Y5QLAdaf2EhvLt9to7vy4iNcreESoDfLklXvB+HL4vwnWvOwjGRHCSxCwlWUsw5srKMreY0I91TXZjuy+EwHdYhRnmZpHjNiFDKFCHkTlUX8yRUJjeyJVlMuCxkuFJv4QC2UHiFZWVgvkSavlFj3w31wuy2USxuzyhntEqX8OUXfMw43sDr7J6VU+2nA9/gsNjghUplDBtGVJgLBvMPlH+o1L7F7LIElE+LnkxcoIP8AE0S44EgklrdC1vKr5PCrqVdVZTxVgCDY3Gh8wKDJt0dzcFtPZMeVe7nVpFMy6t3gb632kK5TlPAHS3GpTcPDbYwcv0WeETYVTZZTKnhXkUuczL91gLciK0eOMKLKAB0AsPlXVAVGbe2Bh8aipiY+8VWDgXYeIAjipBtYnSpOigbbOwEUEaxQosca3sqiwFzc/Mk05oooCikJMWgOW+vQa87cq4XFkmwie32jYcvM3NA6oqOnnYE3ljVb6WFzYDUH5a8vhXMozxgqGlILDUlLk319L2HkDQPZMUi8XUe/++h+Fe4fEK4JU3sSp0I1HrxqPjhsb93Emovm8Tam/HkePxpfDzeJjnZweAC2C26Hhz4milcS7gjLkC21LX0Olh7/ANKYzYliSDOqgWuEUsbE6a8vP0p5iXFryFEUa+Kx/PQH41B4zeXCRaAmQ9FGn7fKpMiSzeIgK8mhUlmGUg8tNOXPrSwjyC/8OJbC/C9x58Kqz7yYqa4hjWMHgSL0yl2bJJrisQT5XrNrSwY7efCxaGRpW6Lr+wqMfeXFzaYeARr9ptf6U3hGGi/lx5261xi9usB7SxjoupqWU7k2NLJ4sViTb7N7AUrDHhYfYTOetv1NV3E7ZHGxb7znT51X8fvSL5c9z9lBesTlENREz2X/ABe8BUcVQeWp/aq/jt4hxuW82Nh89KoWM2tiW9mMoDwNizfPQU4w26uKnCu4IB+tM2UDzAPH3VnLPGIuZpYxmUzJveoJ/jJ7rn8qKaru5h18L4yIMOIVWIv5XtXlY+/T/r/ifw19eT6FrNt5+058FjRh5MGREDrIWuzoeDxiwHxPIjStJpltTZEGJXJPDHKvIOoa3pfh7q7bgVWftW2YqZhM7G3sCKQN6eJQvzrOt29my7V2t9MihMWHE6zM3BRkKkKDwZ2K624ZifXW8LuNs6M5lwUFxzKBrf7r1PxoFAAAAHADQCr5tVc393UXaWG7rPkdWDxuRcBrEEEcwQT8ulV7c3cDGYWWJ5toM0cObLAmcxnMCCCHNgNb6LfzrRqKIY7S2Nh8RYTwRS24d4itb0zDSvNm7Fw2H/kYeKK/Hu0Vb+uUa0/ooCivCbcaTkxCgA348La34fvQK0U0bFt9WJjfmSFHz1+VJTYhh7UkajXkSdATY69PyoJCuHkA4kD30yxKZgg8b3B9k2VvxfpXpgsxAVALixY5iTbkOVA4GMS4W+pNuBtf1rmLEMdWQouurEX5dD60grXUgyFmQgnu1t/pHG/9a5WK9j3Z5EmQ+VtR14UV7LMw0aSwtfwqWJHDkNNfXhxpJIla2dZGXiWkIAU8jbTn0pyZCQvjUX0OQX1F+B5DgaSWbVlCySXN7kiwIOliOGovQCzFTd3jXMLWTUljwNyNbAdOdc4ezEG8j2GYMfCh6Dz0PPSlABGLsY4l5cz7yeJqI2hvXho9ATK3y/b4CpYlBGwsYwgb61hmtc6gNp8zyFdMjLdpJsq2I1IGh9LWI6jrVNxG9mKmNoUEY9Ln4mmT4GSTxYicnyvUspZMRvBgoLhbyMddNbnqb6X91R029OKm0hjyDrbX51HQiFPYjzHqa9xG0G5sEHQcflWbWhPgJH8WInPpe9L4Y4eMeCMsepqNMxPsoT5toKaYzElR43sOi6CoqXx21WCnxqluAHy4VHy7RB1AY+bVDNiHYfwomP3iLced2r2TAyMFzyLGCt7C7G3lbjpV2od4nHH6z2HRdKicXtHKAVXjezNepTAbBRiv8OeW9hmIyqAeLa2voaln2SIxlvDGgOl7s3loeeo+frV2ihx4d8XJkeQhbMdAVHhUm1zzNrDzNPcNs+GEDMyj7q/qf79avWx91jI2dnc5bsCVyRkXIANxc6UjjsbsnZ9y4SafXwoM1je9rngPWuvqdPvnvUOXHU2wh9ixTu//AOPGx0sMiDKAeeZtL6DW9TWMweFwoz7QxN3/APWGLsfI2/vzqnbd7S8VMCkAGHi4AJ7VvXl7qpU0pYlmJJPEk3J+Nbw6fDHmuWMtXKWmN2i4FDlj2apQaKWy3I89KKzGiuaoYuX1tRRRWkFFNsRKQbZgOmlz0OnqRSAe1z/FkN7gWsPTSwt60Wj1pVHEgeppP6SCCVBa3QHX0vx91IvD47hU118Wpvzt7q573S5dny/YXiR6caBbvnv7IUaasdfPQUm0+pYMWW5GVBe3hvrbjw09RXs8Vzfu1PPM3oNbf3wpAz5TYyqBwyovpz5aelB7HhwVN4yTluGlINzyB5i3pXrzFTZnRNPZUEkWH9enSiGEMGOR24e23ta3Hu5++u5nNgc6LcAEgXOboD0oOUH3Xe+l20BFwb8ffSeXIdDHHc318Ta6WFz5eldumY2/iNmF/srbhY24HThSsaEA5lRANFN8xHHXUefD1oOWZXYWL9NAQBbNx+fyrgQ5TfJGutszHW1zb5a++vch4l2It5IvEm/XhpUZi9qYaIEM+b7q8OFuPppWZygSSYgEEGTNccI1I+BF/wA69khUatYAfWkOvw4dKp+M3zb2YYwg68TUBi8fLKbySH0vepYvW0N5MNELFjIeg0X9vzqAxu+M8nhhUIPIa1XUAHBbnzpYM3M28hp+VS1dyrJIc00pJ6E3NdwrGvspc9TXAS3L3sbD96QlxyDQvc9F0H71A/kxRHFgvkNPkKT7zop9WNh+9RZ2gfqIq34Mxt+etcnCSS2JZyCD7AvexsRc/tVoO8Tj1X2pPcvhH70xO1SVvDGT4stwLm/509h3fub9woy6lpmuOfuqWhwQJVRNkLAaRJdc2tyDbhp+VXaITA4bESG7hlXW5JAsKsWF2EvG1/PjTo7EsWHdZzxvI5CmwBuffy5Wqa2TFdAPBoSLR6qAOA+FapFXTYAuVMbyfekbKtuIAtx/pTzCLays0KjKyhI7s68gQR0seVWHaOChUCSdgqqCPE5UW9OBqkbZ7SsJhQUwMCuw0z2yqP1NBZcFssfzXeXKCGzSN3a2Hlxtx0IqF23v5s/ClhBGJ5bnVQMoPr+1ZZt7enFYw3mlJH2Boo9w/WoYULWfeLfrGYu4aQon/rj8I95Gpqsk1P7H3OxeJGZIsq/afwj50ttbcXGYdC5QOo1JQ5reduNdeer0Iz2TnF+rXZlV0rYNIS4tV040njpbAdCdab4PCh5FR5FjBPtt7I8zXZZKnaJ5KKKtUO56BQDPhm+93jLcHUaW00oq7Z9I+maKKKiksQbC9wPMi+n92pmhzC+d3B+yMotrUgwqNTEC/wDMZzyVF00P5X6mopUi3jKqpAIJc3IUHjp1F64MwPCQmx4RjS+uh5ddP6URQXYEQ873drkXOumvKl2gc6GTKL/UAGnTW9UAjIJsoIIF2ZuPHj/fOkxKbqpkQEngouTbiPzpX6GlvFdtb3Y3rxpo4xcWA434Dr76l0G2IUH/AMUj31sTYaE2504hJBIKoBYaLe9/PlaoHau98UeinOfLQfuap+098JpLgHIvQaVnd6Gj47a8cXtyKvkNW/YVWNob6KP5SXP2m1P9KoEmMZzzY/3yFKpDK3LKPPSoqWx+3ZpT45LDpf8ASox5wOOvrSbRIvtye4aV4uKQexHfzP7moFhIzcAbfClFUD2iBTZDLKSoYA9BqflSsGxGdc2R3Oa1iclx9r0q0CTasSaXuelcrjJnYqkTC3MqQOF+NSSbERMpvHG50P12uNbDy1qXwWAKKHXvZ81xawUC1+R4evlWtop7YGdye9kWMaHU8tb6DjUlhNkqGUqJZGUWORQFYjnc/DjVuwuy0VSzRxx9DfMVPEX99KiJXULnkkKsT4BlB52N9LD9qtCJOxVjsVSFAbFjIbkEm9rcNDaniYElCe9kZb2CwplI8v61LDZFm8ES6gMWfxWPS3XQVIQYCS4Jk4cVCgA+VEQWFwWVGPdBCyhR3zaMBckMPTX40vGh0USi3DJCvLlY8udP9q4jCYSMnEOoXjZjck+hrNd4e1rKDHgYQg4d4wt8FH60GiTJh8ODNiJMosdZHubHiLVRdu9q0UQMeAhHTOwsPcOJrLNp7WmxDZ5pGdvM6e4cBTaJSxAAuToAOdEtJbY27iMU2aeVn8ifCPQcKj6vmwuzSWQB8Q/drxyjVrfpVni3Y2egKjDmS2hcm5vzry9X5fp8MtuP8U+a/PZzY6GeTG61bs23ehEAxUih3YnLcXCgfrVa333TXDqJ8OSYW4g6lD+1WPsv20jwfRifEpNvQ11flOpy1ei36EzV8+49xLk0NOtSslrO0ne/dqCoNrk2GnS1K4HHZ7gixGhFMMSvcHwgkMeHIE86Rh77MTYAgjS2jD1r579PhnhMxUR4mZ5/d3u3DPO07dbuGM0a/wAGQ6gfUc/oazuM38J4jgf0r6dxWFSaNo5FBVhYg1867zbJ+j4iSIG+RrA9RxHyNfSfC/IT1Gn9ef8ANj/uHQ6nS2Tcdk/sztAmiiSMorFBlzEC5A4fKwoqlmc8xRXu26tQ+06KKKiim8mKVWy2N/JT6/CnFR+2MV3a3zMo19lcxuBfp0BqTNRaxFnIxHVSo6m1ReI2zkU94yqb6ZdTb0P961VMTt6bvFKQTlLm7OCBlIINhbTje9QJwM7tYlrnoDf4muHHV3xccNThSzbT3xC3yDX7TftVWxm3JJjxdz5XAp6d15VIvFcnW7HN/SnUWx8p/iyALa5UEL+VapFbfDufaZUHxNC4eJeOZueugqel2dEALCR848JjB4cOPKncmz5Gt/BQaWBka+g4X861QrP0k2/hKLfcXMflS0+zZWt4r9cxyge7jVpw+zI3Xu89n1YmIZb2Fj+fypXCbHQv4oHIuCXkYdOl6tCpw7B+u0mh8P8ADUnX11qQwmwQbkQux0N5DYDXTj6VaBCy3BlRADbwpre/wva1K4bBo7FSszA3uWuFvfhVoROE2a0RBfu0XXwqpJPv686ViwSyXss0oP2rqtvLhyqfgwc9gto0AGlrsQff5Wpxh9msCC8rORbTRR62FUQiYQoPYiiAIAPtsAR+96cSZXK/zWsLEILK3PWpnD4KKIGwAHE3N/O+tef4hHwUlj0UcqIjsNs9goCQqga+fOcx00GnPSpPA4Z1vncNwsAtgLC1criZG9mLKOrm3yFcYuXW3fZTYDKi3IOl6gkctU7tB34TZ6ZEs07DRfs+ZqXxGJTDwyYhs/gUm7nU2GmnKvmzbe03xMzzSG7Ob+g5Cg82ttabEyGSaRnY9eA8gOQplRRVQVY9wMv06HOBa/PrbSq7SuFxTROrrxU3FcPUYTqaWWEd5iYawmIyiZfSMpqC2hh2W5Qmx1I5etebpbeXGQBr+ICxFdbUj1CgtdvPSvgum08tLWnDLj3+z1rjaS2cVmiaGQKVYFcvG4rKNs4CXZeMBUnLfMjfaW/D1FargcHIjWvYW4gfKq92thThY76uHFjzsRY16/S62OHU/Xjzhn3j/v5cGpjM47vMLTsDbEeMhVwRe2o86kiQK+ftjbdlwp8B8J4ipbF7/TMLBbH1ri1/gtb7JjT5xaw6nCYuZprO0NvxQgksNKw7buL7/ESy/ba/yt+lNMZtCSU3kc26U2bFgaKPjXtfG/GfpbymbmXW6jXjPiHphorwYwdKK9d1X2PRRRQFJYlSVNmynra9uuhpWigjJJMy5Qkj68SMo/TSmuOmkGW/dR6W11J6e4a04mYHw3kc6+EXHAW40Nh2fURKCqgIz+I8eBqKhHkDArnklLG3hFrHytwFC7KVUuI0Q3N+8Oa3nr+VTx2azBQ0lranIMtzrXX+ERc1zc/FrrShBQoUcFpswtcRomhBHG4869jwAYqVwzHic0htY+nOp95Yo7agfV08uWldHEXTNGpYngOHxvVDCLAyZQLqh1ByLpa+lr+VepsNPrln0t4ieXpTgyy2JYoii2vtG/6U2eQN9aWS4v4RYD+ulA9ZIkuxyi1rk29BXv0gaWVmuAbgaWNJQQhUICKlyD4je/rfnXMoJAzSMb6gRrpYelAuZGuwICi2jEjjrrbpwplnzcZma/KMdNPdXfc3NxBqNQ0h+dK99oQZEBOoyC5tz/8AuoEjh8rHJENB7btoQeIrmR7Bf4iqwuGEa3JHIDmK9gANwqSSEixL6A606bDv4cuRBbxaXN/I0DXulkcN3btwuWJAGnG3M6ClJJcobvSkZa+UrxsOdLDA3TLI7Nre98vu05U4XDKABlBtwvr+dBR+13EFNmEBr5mVSeor58vX0l2p7OM2zpQouUs9vwm5+VfNrCkEgGuiwHE1wKa4g315cKqF3xgHAX9aFxQbQi3pTGigte6O32wc4N/AdGH61rqbxQuoa4POvn+N70uk7roGIHrXkdd8Vh1Ge+J2y7Oj1GyKmLbBtrfSOMaMKzveHeRsWQNcoPOq5I/NiSa4M55aVrpPitLQmMu8mr1M5RUcQestIy6C9IQ5mYBbkngOtLTqRdXBVhoQdCK9V1jaQG+tXKHs2xDxJKksRVwCNefT1qnxnMMp9ocPMdKvOzu0fuMOIEwyjKtla97P1sRpVivKSbnst2h9hP8AdRUJLvdjmJJxUlz0IA+FqKvBy+uKKKKyoooooE5Zguljr0FI/SHJIWMgdWNh+9KYprLctlA4m16ZjKVykSPmObXT3crUUpLKwBzyKmbRbakH38aQCAsGAkcjW5JAv6U5juAMwRByF7kE1zJpoWdiBchRb8qAlXKbgxqOtrm/P50lMlibtKx4WUEDWlI47AWjC+bG9r13iplsAXsb/U1J8qDxAV9lFVb65j8TSWIlBb+aQOQQX/v+tOBgkOpu1/tG/wAqcKgHAAUEfBErN/KYgjVn+WhNOBC9gM4W32RTquXkA4kD1NAlFhrXJZmuLG50+Fex4ZFNwoB4Vx9NQ+zdvwi9dSSMG9kZebE0QvXDyAcSBTF5LsQZtOQQagHrXnd28AjZrG+ZzoSRxoFZtpRroDmPRdTXQxTMt1jIN7Wfw6daSSxupKB7EArxHWmkdibDvJNOeguPPrUU/ibOCkhQkg3A6HS1fOnaJu0cFimUD+G3iQ+R5e79q+gGYx2No4x56kiojf7YCbQwhyWMiXZD5jiPQ0HzZTQmxIPA/I09xERRipFiDYg8iOVNZ47iqhFIWLBQCSTYAczVvw24bIgkxc8eHU62YjN86r2w8f3M0cpXMY2BI6irptvamysZMcTNJPma147MQLC1h0HpViElGYjc+JoXmweKWfuxdl52FViNr1Zdqb3xLC2HwMPdRuLM7e2w8rcKqMTWpNEPW9og8eVTm7WysLKGbE4kQheXM1B4lb6jjXANxf41FXyDbey8GwbDwPNIvB20F+utVneDbbYudp2RULWFl4aVE0Xq2jqVeYrySUHXgedexG5okgFRSXeCivfo4ooPtSiiigKKKKDxxpTGaYAFHdiT9nS3p8KKKLD1Y7gMsY6gsdfXnSojkPFgPwj96KKDr6Kpy5rsV5k/nSgRV4ACvKKIT+mLmCi5J8q8ZpLmwUC/E66V7RRZJyzLlKu1+thauY40Zge70t7Rt+VFFJHrqVuTJZegXzr1AoYx2LAi5JNx6da8ooOXjcezkT3E+lJvIFYXZ2ccgbC5/wDuvKKkjxI3zeGNF1PiJuaTBsbNKxvpZRYUUUHcsrOcoiDBTYFiPjTjAo63zlbHgFFrV7RSBi/bLuuIZRiYwAkh8Q6P199ZgaKKQSbSrY3FcSDmOBooqo5ryiigUV+VJuCp0oooEjKaFQtRRQPIQF40rcGiigRZdaKKKD//2Q==" width="600px" height="400px" />
#
# **Resumen.**
# > En esta tarea, calcularás medidas de rendimiento esperado diario y volatilidad para cuatro diferentes portafolios. Usarás los históricos de precios que ya descargaste en la tarea anterior.
#
# **Criterio de revisión.**
# > Se te calificará de acuerdo a los resultados finales que reportes, basados en tu análisis.
#
# **Antes de comenzar.**
# > Por favor, copiar y pegar este archivo en otra ubicación. Antes de comenzar, nombrarlo *Tarea4_ApellidoNombre*, sin acentos y sin espacios; por ejemplo, en mi caso el archivo se llamaría *Tarea4_JimenezEsteban*. Resolver todos los puntos en dicho archivo y subir en este espacio.
# ## 1. Descarga de datos (20 puntos)
#
# Descargar los precios diarios ajustados en el cierre para el índice S&P 500 (^GSPC), Microsoft (MSFT), Walgreens (WBA), y Tesla Motors (TSLA) durante el periodo comprendido del primero de enero del 2011 hasta el 31 de diciembre del 2015.
#
# 1. Mostrar el DataFrame de los precios diarios (5 puntos).
# 2. Graficar los precios (5 puntos).
# 3. Mostrar el DataFrame de los rendimientos porcentuales diarios (5 puntos).
# 4. Graficar los rendimientos (5 puntos).
#importamos librerias
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import pandas_datareader.data as web
def get_adj_closes(tickers, start_date= '2011-01-01' ,
end_date='2015-12-15'):
closes = web.DataReader(name=tickers,
data_source='yahoo',
start=start_date,
end=end_date)
closes = closes['Adj Close']
closes.sort_index(inplace=True)
return closes
# +
#descargamos los datos
port=web.DataReader(name=['^GSPC','MSFT','WBA','TSLA'],
data_source='yahoo',
start='2011-01-01')
names= ['^GSPC','MSFT','WBA','TSLA']
start='2011-01-01'
end= '2015-12-15'
# -
#1
closes= get_adj_closes(tickers=names,
start_date=start,
end_date= end)
closes.head()
#2
#graficamos
closes.plot()
#3
#obtenemos los rendimientos
r_port=closes.pct_change().dropna()
r_port.head()
#4
#graficamos los rendimientos
r_port.plot(grid=True)
# ## 2. Rendimiento esperado y volatilidad para cada activo (30 puntos)
#
# Usando los datos de rendimientos diarios de MSFT, WBA, y TSLA:
#
# 1. Reportar en un DataFrame el rendimiento esperado diario y la volatilidad diaria para cada activo. Reportar en otro DataFrame el rendimiento esperado anual y la volatilidad anual para cada activo (10 puntos).
# 2. Calcular la matriz de varianza-covarianza (base diaria) para los activos MSFT, WBA, y TSLA (10 puntos).
# 3. Calcular la matriz de correlación (base diaria) para los activos MSFT, WBA, y TSLA (10 puntos).
#1
#rendimiento esperado y volatilidad diarios
tabla= pd.DataFrame(data={'Mean':r_port.mean(),
'Volatility':r_port.std()},
index=r_port.columns)
tabla
#1
#Cambiamos los datos diarios a anuales
tabla2= pd.DataFrame(data={'Mean':r_port.mean()*252
,'Volatility':np.sqrt(252)*r_port.std()},
index=r_port.columns)
tabla2
#2
#matriz varianza-covarianza
r_port.cov()
#3
#matriz correlaciones
r_port.corr()
# ## 3. Rentimiento esperado y volatilidad para portafolios (30 puntos)
#
# 1. Calcular los rendimientos diarios de los siguientes portafolios. Reportar en un DataFrame el rendimiento esperado anual y la volatilidad anual para cada portafolio, calculando lo anterior tratando cada portafolio como si fuera un activo individual (15 puntos).
# - Portafolio 1: igualmente ponderado entre MSFT, WBA, y TSLA.
# - Portafolio 2: 30% MSFT, 20% WBA, y 50% TSLA.
# - Portafolio 3: 50% MSFT, 30% WBA, y 20% TSLA.
# - Portafolio 4: 20% MSFT, 50% WBA, y 30% TSLA.
# 2. Para cada uno de los anteriores portafolios, reportar en otro DataFrame el rendimiento esperado anual y la volatilidad anual para cada portafolio, calculando lo anterior mediante las fórmulas de rendimiento esperado y volatilidad para portafolios derivadas en clase (10 puntos).
# 3. Comparar los resultados del punto uno con los del punto dos (5 puntos).
#1
#añadimos los nuevos portafolios
r_port['Port1']= 1/3*r_port['MSFT']+1/3*r_port['WBA']+ 1/3*r_port['TSLA']
r_port['Port2']= 0.3*r_port['MSFT']+0.2*r_port['WBA']+ 0.5*r_port['TSLA']
r_port['Port3']= 0.5*r_port['MSFT']+0.3*r_port['WBA']+ 0.2*r_port['TSLA']
r_port['Port4']= 0.2*r_port['MSFT']+0.5*r_port['WBA']+ 0.3*r_port['TSLA']
r_port.head()
#1
#Obtenemos los rendimientos esperados
Er1 = r_port['Port1'].mean()
Er2 = r_port['Port2'].mean()
Er3 = r_port['Port3'].mean()
Er4 = r_port['Port4'].mean()
Er1, Er2, Er3, Er4
#obtenemos volatilidad
s1 = r_port['Port1'].std()
s2 = r_port['Port2'].std()
s3 = r_port['Port3'].std()
s4 = r_port['Port4'].std()
s1,s2,s3,s4
#1
#datos anuales en DataFrame
tabla3 = pd.DataFrame(data={'Mean':[Er1,Er2,Er3,Er4]
,'Volatility':[s1,s2,s3,s4]}
,index=['Port1','Port2','Port3','Port4'])
tabla3.Mean = tabla3.Mean*252
tabla3.Volatility = tabla3.Volatility*252**(1/2)
tabla3
#peso de los activos en los portafolios
tabla4 = pd.DataFrame([[0,1/3,1/3,1/3]
,[0,0.3,0.2,0.5]
,[0,0.5,0.3,0.2]
,[0,0.2,0.5,0.3]],
columns=['^GSPC','MSFT','WBA','TSLA']
,index=['Port12','Port22','Port32','Port42'])
tabla4
rE1=(tabla['Mean']*tabla4.iloc[0]).sum()
rE2=(tabla['Mean']*tabla4.iloc[1]).sum()
rE3=(tabla['Mean']*tabla4.iloc[2]).sum()
rE4=(tabla['Mean']*tabla4.iloc[3]).sum()
vol1 =((tabla['Mean']*(tabla4.iloc[0]-rE1)**2).sum())**0.5
vol2 =((tabla['Mean']*(tabla4.iloc[1]-rE2)**2).sum())**0.5
vol3 =((tabla['Mean']*(tabla4.iloc[2]-rE3)**2).sum())**0.5
vol4 =((tabla['Mean']*(tabla4.iloc[3]-rE4)**2).sum())**0.5
tabla5= pd.DataFrame(data={'Mean':[(rE1,rE2,rE3,rE4)*252] ,
'Volatility':[(np.sqrt(252)*(vol1, vol2, vol3, vol4))]},
index=['Port12','Port22','Port32','Port42'])
tabla5
# **Observaciones**
#
# Con ambos métodos es posible realizar los calculos necesarios, asi como cambiarlos de diarios a anuales, pero creo yo que el primero es un poco más sencillo.
# ## 4. Gráfico de rendimientos esperados vs. volatilidad (20 puntos)
#
# Crear un gráfico de puntos que muestre el rendimiento esperado y la volatilidad para cada uno de los activos, el índice S&P500, y los cuatro portafolios en el espacio rendimiento esperado (eje y) contra volatilidad (eje x). Etiquetar cada uno de los puntos y los ejes apropiadamente.
#mostramos tabla
tabla
# +
X = pd.concat([tabla3['Volatility'],tabla2['Volatility']])
Y = pd.concat([tabla3['Mean'],tabla2['Mean']])
plt.scatter(X,Y)
plt.xlabel('Volatility')
plt.ylabel('Expected return')
plt.text(X[0],Y[0], 'Port1')
plt.text(X[1],Y[1], 'Port2')
plt.text(X[2],Y[2], 'Port3')
plt.text(X[3],Y[3], 'Port4')
plt.text(X[4],Y[4], 'MSFT')
plt.text(X[5],Y[5], 'TSLA')
plt.text(X[6],Y[6], 'WBA')
plt.text(X[7],Y[7], 'GSPC')
plt.grid()
plt.show()
# -
# <script>
# $(document).ready(function(){
# $('div.prompt').hide();
# $('div.back-to-top').hide();
# $('nav#menubar').hide();
# $('.breadcrumb').hide();
# $('.hidden-print').hide();
# });
# </script>
#
# <footer id="attribution" style="float:right; color:#808080; background:#fff;">
# Created with Jupyter by <NAME>.
# </footer>
| Modulo2/Tarea4_GalindoAriadna.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Visualization Report
#
# ##### by <NAME>:
# 8/21/2018
#
#
# ****
#
# <img src="https://media.giphy.com/media/6h8jgwC3dU6vS/giphy.gif">
#
# In my visualization project, I used the predicted results from a neural network that can classify the breeds of dogs. I found that the three most famous dogs’ breed from the data:** Golden Retriever, Pembroke, and Labrador**.
# Most popular Dogs' Breed
#
# <center> ** 1. golden_retriever ** </center>
# <center>** 2. Pembroke **</center>
# <center>** 3. Labrador_retriever** </center>
#
# <img src="https://media.giphy.com/media/ITacRy2zH4vMQ/giphy.gif">
# <center>*lab puppy + short lighter hair</center>
#
# <img src="https://media.giphy.com/media/FnsbzAybylCs8/giphy.gif">
# <center>* Golden Retriever + long golden hair</center>
#
# A **golden retriever and Labrador** The golden retriever is the first most famous dog, and the Labrador retriever is the third most popular dog. We could not deny that the golden retriever and labrador share similarities including their personalities and physical traits --except that the golden retriever have longer golden fur. I could presume that the golden retriever and Labrador are both an excellent pet for people with family. We could not deny their good-natured,friendly, and gentle personality
#
# <img src="https://media.giphy.com/media/kvZ76ft0ozrCU/giphy.gif">
#
# In another way, a **Pembroke or a Corgi** is as well famous from Twitter. I am pretty sure that influence comes from the Queen of England who made corgis dogs very popular in the media.
#
# <img src="https://media.giphy.com/media/CA32wUtMUvrUY/giphy.gif">
#
# <center>** 1. Cooper ** </center>
# <center>** 2. CHarlie **</center>
# <center>** 3. Bo ** </center>
#
# Next ones are the 3 most popular names for dogs. **Charlie, Lucy and Oliver**. As you can see, WeRateDogs is addressing and sharing tweet posts to the English speaking people from England, Canada, United States, Australia, and other British colonies.
#
# <img src="https://media.giphy.com/media/YTXujdmJn3iOVZhMlQ/giphy.gif">
# <center>*picture of a pupper (labeled as a small adult dog that acts like a puppy)</center>
#
# <center> ** 1. pupper ** </center>
# <center> **2. doggo** </center>
# <center>** 3. puppo** </center>
#
# Other 3 main favorite slangs are **pupper, doggo, and puppo**
#
# Pupper means a small adult dog that acts like a puppy or teenager dog. Doggo is an adult dog that acts like a puppy. Puppo, on the other hand, means puppy. Again, pupper is ranked as number one, and doggo as number 2. Possibly, people enjoy watching an adult dog acting like a puppy more because they are flawed and humorous. I guess it is important to notice these Twitter users' attraction to something wholesome. --Like a mother nurturing her baby, people seem to enjoy taking care of vulnerable dogs.
| act_report.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 机器学习练习 3 - 多类分类
# 该代码涵盖了基于Python的解决方案,用于Coursera机器学习课程的第三个编程练习。 有关详细说明和方程式,请参阅[exercise text](ex3.pdf)。
#
#
# 代码修改并注释:黄海广,<EMAIL>
# 对于此练习,我们将使用逻辑回归来识别手写数字(0到9)。 我们将扩展我们在练习2中写的逻辑回归的实现,并将其应用于一对一的分类。 让我们开始加载数据集。 它是在MATLAB的本机格式,所以要加载它在Python,我们需要使用一个SciPy工具。
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.io import loadmat
data = loadmat('ex3data1.mat')
data
data['X'].shape, data['y'].shape
# 好的,我们已经加载了我们的数据。图像在martix X中表示为400维向量(其中有5,000个)。 400维“特征”是原始20 x 20图像中每个像素的灰度强度。类标签在向量y中作为表示图像中数字的数字类。
#
#
# 第一个任务是将我们的逻辑回归实现修改为完全向量化(即没有“for”循环)。这是因为向量化代码除了简洁外,还能够利用线性代数优化,并且通常比迭代代码快得多。但是,如果从练习2中看到我们的代价函数已经完全向量化实现了,所以我们可以在这里重复使用相同的实现。
# # sigmoid 函数
# g 代表一个常用的逻辑函数(logistic function)为S形函数(Sigmoid function),公式为: \\[g\left( z \right)=\frac{1}{1+{{e}^{-z}}}\\]
# 合起来,我们得到逻辑回归模型的假设函数:
# \\[{{h}_{\theta }}\left( x \right)=\frac{1}{1+{{e}^{-{{\theta }^{T}}X}}}\\]
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# 代价函数:
# $J\left( \theta \right)=\frac{1}{m}\sum\limits_{i=1}^{m}{[-{{y}^{(i)}}\log \left( {{h}_{\theta }}\left( {{x}^{(i)}} \right) \right)-\left( 1-{{y}^{(i)}} \right)\log \left( 1-{{h}_{\theta }}\left( {{x}^{(i)}} \right) \right)]}$
def cost(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
first = np.multiply(-y, np.log(sigmoid(X * theta.T)))
second = np.multiply((1 - y), np.log(1 - sigmoid(X * theta.T)))
reg = (learningRate / (2 * len(X))) * np.sum(np.power(theta[:,1:theta.shape[1]], 2))
return np.sum(first - second) / len(X) + reg
# 如果我们要使用梯度下降法令这个代价函数最小化,因为我们未对${{\theta }_{0}}$ 进行正则化,所以梯度下降算法将分两种情形:
# \begin{align}
# & Repeat\text{ }until\text{ }convergence\text{ }\!\!\{\!\!\text{ } \\
# & \text{ }{{\theta }_{0}}:={{\theta }_{0}}-a\frac{1}{m}\sum\limits_{i=1}^{m}{[{{h}_{\theta }}\left( {{x}^{(i)}} \right)-{{y}^{(i)}}]x_{_{0}}^{(i)}} \\
# & \text{ }{{\theta }_{j}}:={{\theta }_{j}}-a\frac{1}{m}\sum\limits_{i=1}^{m}{[{{h}_{\theta }}\left( {{x}^{(i)}} \right)-{{y}^{(i)}}]x_{j}^{(i)}}+\frac{\lambda }{m}{{\theta }_{j}} \\
# & \text{ }\!\!\}\!\!\text{ } \\
# & Repeat \\
# \end{align}
#
# 以下是原始代码是使用for循环的梯度函数:
def gradient_with_loop(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
parameters = int(theta.ravel().shape[1])
grad = np.zeros(parameters)
error = sigmoid(X * theta.T) - y
for i in range(parameters):
term = np.multiply(error, X[:,i])
if (i == 0):
grad[i] = np.sum(term) / len(X)
else:
grad[i] = (np.sum(term) / len(X)) + ((learningRate / len(X)) * theta[:,i])
return grad
# 向量化的梯度函数
def gradient(theta, X, y, learningRate):
theta = np.matrix(theta)
X = np.matrix(X)
y = np.matrix(y)
parameters = int(theta.ravel().shape[1])
error = sigmoid(X * theta.T) - y
grad = ((X.T * error) / len(X)).T + ((learningRate / len(X)) * theta)
# intercept gradient is not regularized
grad[0, 0] = np.sum(np.multiply(error, X[:,0])) / len(X)
return np.array(grad).ravel()
# 现在我们已经定义了代价函数和梯度函数,现在是构建分类器的时候了。 对于这个任务,我们有10个可能的类,并且由于逻辑回归只能一次在2个类之间进行分类,我们需要多类分类的策略。 在本练习中,我们的任务是实现一对一全分类方法,其中具有k个不同类的标签就有k个分类器,每个分类器在“类别 i”和“不是 i”之间决定。 我们将把分类器训练包含在一个函数中,该函数计算10个分类器中的每个分类器的最终权重,并将权重返回为k X(n + 1)数组,其中n是参数数量。
# +
from scipy.optimize import minimize
def one_vs_all(X, y, num_labels, learning_rate):
rows = X.shape[0]
params = X.shape[1]
# k X (n + 1) array for the parameters of each of the k classifiers
all_theta = np.zeros((num_labels, params + 1))
# insert a column of ones at the beginning for the intercept term
X = np.insert(X, 0, values=np.ones(rows), axis=1)
# labels are 1-indexed instead of 0-indexed
for i in range(1, num_labels + 1):
theta = np.zeros(params + 1)
y_i = np.array([1 if label == i else 0 for label in y])
y_i = np.reshape(y_i, (rows, 1))
# minimize the objective function
fmin = minimize(fun=cost, x0=theta, args=(X, y_i, learning_rate), method='TNC', jac=gradient)
all_theta[i-1,:] = fmin.x
return all_theta
# -
# 这里需要注意的几点:首先,我们为theta添加了一个额外的参数(与训练数据一列),以计算截距项(常数项)。 其次,我们将y从类标签转换为每个分类器的二进制值(要么是类i,要么不是类i)。 最后,我们使用SciPy的较新优化API来最小化每个分类器的代价函数。 如果指定的话,API将采用目标函数,初始参数集,优化方法和jacobian(渐变)函数。 然后将优化程序找到的参数分配给参数数组。
#
# 实现向量化代码的一个更具挑战性的部分是正确地写入所有的矩阵,保证维度正确。
# +
rows = data['X'].shape[0]
params = data['X'].shape[1]
all_theta = np.zeros((10, params + 1))
X = np.insert(data['X'], 0, values=np.ones(rows), axis=1)
theta = np.zeros(params + 1)
y_0 = np.array([1 if label == 0 else 0 for label in data['y']])
y_0 = np.reshape(y_0, (rows, 1))
X.shape, y_0.shape, theta.shape, all_theta.shape
# -
# 注意,theta是一维数组,因此当它被转换为计算梯度的代码中的矩阵时,它变为(1×401)矩阵。 我们还检查y中的类标签,以确保它们看起来像我们想象的一致。
np.unique(data['y'])#看下有几类标签
# 让我们确保我们的训练函数正确运行,并且得到合理的输出。
all_theta = one_vs_all(data['X'], data['y'], 10, 1)
all_theta
# 我们现在准备好最后一步 - 使用训练完毕的分类器预测每个图像的标签。 对于这一步,我们将计算每个类的类概率,对于每个训练样本(使用当然的向量化代码),并将输出类标签为具有最高概率的类。
def predict_all(X, all_theta):
rows = X.shape[0]
params = X.shape[1]
num_labels = all_theta.shape[0]
# same as before, insert ones to match the shape
X = np.insert(X, 0, values=np.ones(rows), axis=1)
# convert to matrices
X = np.matrix(X)
all_theta = np.matrix(all_theta)
# compute the class probability for each class on each training instance
h = sigmoid(X * all_theta.T)
# create array of the index with the maximum probability
h_argmax = np.argmax(h, axis=1)
# because our array was zero-indexed we need to add one for the true label prediction
h_argmax = h_argmax + 1
return h_argmax
# 现在我们可以使用predict_all函数为每个实例生成类预测,看看我们的分类器是如何工作的。
y_pred = predict_all(data['X'], all_theta)
correct = [1 if a == b else 0 for (a, b) in zip(y_pred, data['y'])]
accuracy = (sum(map(int, correct)) / float(len(correct)))
print ('accuracy = {0}%'.format(accuracy * 100))
# 在下一个练习中,我们将介绍如何从头开始实现前馈神经网络。
# # 神经网络模型图示
# <img style="float: left;" src="../img/nn_model.png">
| Coursera-ML-AndrewNg-Notes-master/code/ex3-neural network/ML-Exercise3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# - placeholders
import tensorflow as tf
x,y = tf.placeholder(tf.int32, shape=[3],name='x'),tf.placeholder(tf.int32, shape=[3],name='y')
sx = tf.reduce_sum(x,name="sx")
py = tf.reduce_prod(y,name="py")
d = tf.div(sx,py, name="d")
m = tf.reduce_mean([sx, py], name="m")
sess = tf.Session()
print ("sum(x): ", sess.run(sx, feed_dict={x: [100,200,300]}))
print ("prod(y): ", sess.run(py, feed_dict={y: [1,2,3]}))
w = tf.summary.FileWriter('./tf_1',sess.graph)
w.close()
sess.close()
# - fetches
import tensorflow as tf
W = tf.constant([10,100],name='const_W')
x = tf.placeholder(tf.int32,name='x')
b = tf.placeholder(tf.int32,name='b')
Wx = tf.multiply(W,x,name='Wx')
y = tf.add(Wx,b,name='y')
with tf.Session() as sess:
print( "Intermediate result Wx: ", sess.run(Wx, feed_dict={x: [3,33]}))
print( "Final results y: ",sess.run(y, feed_dict={x:[5,50],b:[7,9]}))
# +
writer = tf.summary.FileWriter('./fetchesAndFeed',sess.graph)
writer.close()
# -
# - variables
import tensorflow as tf
W = tf.Variable([2.5,4.0],tf.float32, name='var_W')
#here W is a Variable
x = tf.placeholder(tf.float32, name='x')
b = tf.Variable([5.0,10.0],tf.float32, name='var_b')
#b is also a variable with initial value 5 and 10
y = W * x + b
#initialize all variables defined
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
#this computation is required to initialize the variable
print("Final result: Wx + b = ", sess.run(y,feed_dict={x:[10,100]}))
# changing values
number = tf.Variable(2)
multiplier = tf.Variable(1)
init = tf.global_variables_initializer()
result = number.assign(tf.multiply(number,multiplier))
with tf.Session() as sess:
sess.run(init)
for i in range(10):
print("Result number * multiplier = ",sess.run(result))
print("Increment multiplier, new value = ",sess.run(multiplier.assign_add(1)))
# - multiple graphs
import tensorflow as tf
g1 = tf.Graph()
'''set g1 as default to add tensors to this graph using default methord'''
with g1.as_default():
with tf.Session() as sess:
A = tf.constant([5,7],tf.int32, name='A')
x = tf.placeholder(tf.int32, name='x')
b = tf.constant([3,4],tf.int32, name='b')
y = A * x + b
print( sess.run(y, feed_dict={x: [10,100]}))
assert y.graph is g1
g2 = tf.Graph()
with g2.as_default():
with tf.Session() as sess:
A = tf.constant([5,7],tf.int32, name='A')
x = tf.placeholder(tf.int32, name='x')
y = tf.pow(A,x,name='y')
print( sess.run(y, feed_dict={x: [3,5]}))
assert y.graph is g2
'''same way you can access defaut graph '''
default_graph = tf.get_default_graph()
with tf.Session() as sess:
A = tf.constant([5,7],tf.int32, name='A')
x = tf.placeholder(tf.int32, name='x')
y = A + x
print(sess.run(y, feed_dict={x: [3,5]}))
assert y.graph is default_graph
# - named scope
import tensorflow as tf
A = tf.constant([4], tf.int32, name='A')
B = tf.constant([4], tf.int32, name='B')
C = tf.constant([4], tf.int32, name='C')
x = tf.placeholder(tf.int32, name='x')
# y = Ax^2 + Bx + C
Ax2_1 = tf.multiply(A, tf.pow(x,2), name="Ax2_1")
Bx = tf.multiply(A,x, name="Bx")
y1 = tf.add_n([Ax2_1, Bx, C], name='y1')
# y = Ax^2 + Bx^2
Ax2_2 = tf.multiply(A, tf.pow(x,2),name='Ax2_2')
Bx2 = tf.multiply(B, tf.pow(x,2),name='Bx2')
y2 = tf.add_n([Ax2_2,Bx2],name='y2')
y = y1 + y2
with tf.Session() as sess:
print(sess.run(y, feed_dict={x:[10]}))
writer = tf.summary.FileWriter('./named_scope',sess.graph)
writer.close()
| sess_playground.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from jturtle import Turtle
t = Turtle(800,400)
t.clear()
t.forward(100)
# +
t.home()
t.speed(5)
colours=["red","blue","yellow","brown","black","purple","green"]
t.penup();
t.left(90);
t.forward(140);
t.right(90);
t.pendown()
for i in range (0,18):
t.pencolor(colours[i%7])
t.right(20)
t.forward(50)
t.right(180)
#t.home()
# -
t.circle(60)
t.circle(50,180)
t.reset()
t.home()
t.right(90)
t.forward(100)
| Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# env: {}
# interrupt_mode: signal
# language: python
# metadata: {}
# name: python3
# ---
# Dot Dash Plot
# =============
# This example shows how to make a dot-dash plot presented in <NAME>'s book
# Visual Display of Quantitative Information on page 133. This example is based
# on https://bl.ocks.org/g3o2/bd4362574137061c243a2994ba648fb8.
#
# +
import altair as alt
alt.data_transformers.enable('json')
from vega_datasets import data
cars = data.cars()
brush = alt.selection(type='interval')
tick_axis = alt.Axis(labels=False, domain=False, ticks=False)
tick_axis_notitle = alt.Axis(labels=False, domain=False, ticks=False, title='')
points = alt.Chart(cars).mark_point().encode(
x=alt.X('Miles_per_Gallon', axis=alt.Axis(title='')),
y=alt.Y('Horsepower', axis=alt.Axis(title='')),
color=alt.condition(brush, 'Origin', alt.value('grey'))
).properties(
selection=brush
)
x_ticks = alt.Chart(cars).mark_tick().encode(
alt.X('Miles_per_Gallon', axis=tick_axis),
alt.Y('Origin', axis=tick_axis_notitle),
color=alt.condition(brush, 'Origin', alt.value('lightgrey'))
).properties(
selection=brush
)
y_ticks = alt.Chart(cars).mark_tick().encode(
alt.X('Origin', axis=tick_axis_notitle),
alt.Y('Horsepower', axis=tick_axis),
color=alt.condition(brush, 'Origin', alt.value('lightgrey'))
).properties(
selection=brush
)
y_ticks | (points & x_ticks)
| notebooks/examples/dot_dash_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from nltk.tokenize import word_tokenize
from nltk import pos_tag
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.preprocessing import LabelEncoder
from collections import defaultdict
from nltk.corpus import wordnet as wn
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn import model_selection, naive_bayes, svm
from sklearn.metrics import accuracy_score
from collections import Counter
#[1] Importing dataset
dataset = pd.read_json(r"C:\Users\Panos\Desktop\Dissert\Code\Sample_Video_Games_5.json", lines=True, encoding='latin-1')
dataset = dataset[['reviewText','overall']]
#[2] Reduce number of classes
ratings = []
for index,entry in enumerate(dataset['overall']):
if entry == 1.0 or entry == 2.0:
ratings.append(-1)
elif entry == 3.0:
ratings.append(0)
elif entry == 4.0 or entry == 5.0:
ratings.append(1)
# +
#[3] Cleaning the text & lemmatization
import multiprocessing
from joblib import Parallel, delayed
# Step - a : Remove blank rows if any.
dataset['reviewText'].dropna(inplace=True)
# Step - b : Change all the text to lower case. This is required as python interprets 'dog' and 'DOG' differently
dataset['reviewText'] = [entry.lower() for entry in dataset['reviewText']]
# Step - c : Tokenization : In this each entry in the corpus will be broken into set of words
dataset['reviewText'] = [word_tokenize(entry) for entry in dataset['reviewText']]
# Step - d : Remove Stop words, Non-Numeric and perfom Word Stemming/Lemmenting.
# WordNetLemmatizer requires Pos tags to understand if the word is noun or verb or adjective etc. By default it is set to Noun
tag_map = defaultdict(lambda : wn.NOUN)
tag_map['J'] = wn.ADJ
tag_map['V'] = wn.VERB
tag_map['R'] = wn.ADV
num_cores = multiprocessing.cpu_count()
#inputs = myList
dataset = Parallel(n_jobs=num_cores)(delayed(my_function(i,parameters)
for i in inputs)
def lemmatizaion(dataset['reviewText']):
for index,entry in enumerate(dataset['reviewText']):
# Declaring Empty List to store the words that follow the rules for this step
Final_words = []
# Initializing WordNetLemmatizer()
word_Lemmatized = WordNetLemmatizer()
# pos_tag function below will provide the 'tag' i.e if the word is Noun(N) or Verb(V) or something else.
for word, tag in pos_tag(entry):
# Below condition is to check for Stop words and consider only alphabets
if word not in stopwords.words('english') and word.isalpha():
word_Final = word_Lemmatized.lemmatize(word,tag_map[tag[0]])
Final_words.append(word_Final)
# The final processed set of words for each iteration will be stored in 'text_final'
dataset.loc[index,'text_final'] = str(Final_words)
return(dataset)
# +
#[4] Prepare Train and Test Data sets
Train_X, Test_X, Train_Y, Test_Y = model_selection.train_test_split(dataset['text_final'],ratings,test_size=0.3)
print(Counter(Train_Y).values()) # counts the elements' frequency
# +
#[5] Encoding
Encoder = LabelEncoder()
Train_Y = Encoder.fit_transform(Train_Y)
Test_Y = Encoder.fit_transform(Test_Y)
# +
#[6] Word Vectorization
Tfidf_vect = TfidfVectorizer(max_features=10000)
Tfidf_vect.fit(dataset['text_final'])
Train_X_Tfidf = Tfidf_vect.transform(Train_X)
Test_X_Tfidf = Tfidf_vect.transform(Test_X)
#the vocabulary that it has learned from the corpus
#print(Tfidf_vect.vocabulary_)
# the vectorized data
#print(Train_X_Tfidf)
# +
#[7] Use the Naive Bayes Algorithms to Predict the outcome
# fit the training dataset on the NB classifier
Naive = naive_bayes.MultinomialNB()
Naive.fit(Train_X_Tfidf,Train_Y)
# predict the labels on validation dataset
predictions_NB = Naive.predict(Test_X_Tfidf)
# Use accuracy_score function to get the accuracy
print("-----------------------Naive Bayes------------------------\n")
print("Naive Bayes Accuracy Score -> ",accuracy_score(predictions_NB, Test_Y)*100)
# Making the confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(Test_Y, predictions_NB)
print("\n",cm,"\n")
# Printing a classification report of different metrics
from sklearn.metrics import classification_report
my_tags = ['Positive','Neutral','Negative']
print(classification_report(Test_Y, predictions_NB,target_names=my_tags,zero_division = 0))
# Export reports to files for later visualizations
report_NB = classification_report(Test_Y, predictions_NB,target_names=my_tags, output_dict=True)
report_NB_df = pd.DataFrame(report_NB).transpose()
report_NB_df.to_csv(r'NB_report_TFIDFVect_Lemmatization.csv', index = True, float_format="%.3f")
# +
#[8] Use the Support Vector Machine Algorithms to Predict the outcome
# Classifier - Algorithm - SVM
# fit the training dataset on the classifier
SVM = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto')
SVM.fit(Train_X_Tfidf,Train_Y)
# predict the labels on validation dataset
predictions_SVM = SVM.predict(Test_X_Tfidf)
# Use accuracy_score function to get the accuracy
print("-----------------Support Vector Machine CM------------------\n")
print("Accuracy Score -> ",accuracy_score(predictions_SVM, Test_Y)*100)
cm = confusion_matrix(Test_Y, predictions_SVM)
# Making the confusion matrix
print("\n",cm,"\n")
# Printing a classification report of different metrics
print(classification_report(Test_Y, predictions_SVM,target_names=my_tags))
# Export reports to files for later visualizations
report_SVM = classification_report(Test_Y, predictions_SVM,target_names=my_tags, output_dict=True)
report_SVM_df = pd.DataFrame(report_SVM).transpose()
report_SVM_df.to_csv(r'SVM_report_TFIDFVect_Lemmatization.csv', index = True, float_format="%.3f")
| .ipynb_checkpoints/Naive_Bayes_tfidfVectorizer-Lemmatization-Copy1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''bluesky_2021_1'': conda)'
# name: python385jvsc74a57bd07dab0508751cb82e61e81c3615f36912821e9ee8b7d6dc826bc4bbc1ddc8fd45
# ---
# # Demonstrate the MMCController ophyd support
# %run -i xy_positioner
stage = TwoD_XY_StagePositioner("", name="xy_stage")
print(f"{stage = }")
# +
print(f"{stage.forward((8.2, 3.14159)) = }")
print(f"{stage.inverse((-.5, -.23)) = }")
print(f"{stage.inverse((25, 6.24)) = }")
# -
st = stage.move((10,1))
print(f"{st = }")
st.wait()
print(f"{stage = }")
print(f"{stage.pair.mmc.xy = }")
stage.read()
stage.move((-1.234, 54.321))
stage.read()
| demonstrate_TwoD_XY_StagePositioner.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# <div style="background-color: #007bff; border-radius: 5px; width: 100%; padding: 10px; color: white">
# <b>Note:</b> This guide is written for an interactive environment such as Jupyter notebooks. The interactive widgets will not work in a static version of this documentation. Instructions for installing Panel and the example notebooks can be found in the <a href="https://panel.holoviz.org/#installation" target="_blank" style="color:white">Installation Guide</a>
# </div>
#
# Panel lets you add interactive controls for just about anything you can display in Python. Panel can help you build simple interactive apps, complex multi-page dashboards, or anything in between. As a simple example, let's say we have loaded the [UCI ML dataset measuring the environment in a meeting room](http://archive.ics.uci.edu/ml/datasets/Occupancy+Detection+):
# +
import pandas as pd; import numpy as np; import matplotlib.pyplot as plt
data = pd.read_csv('../assets/occupancy.csv')
data['date'] = data.date.astype('datetime64[ns]')
data = data.set_index('date')
data.tail()
# -
# And we've written some code that smooths a time series and plots it using Matplotlib with outliers highlighted:
# +
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvas
# %matplotlib inline
def mpl_plot(avg, highlight):
fig = Figure()
FigureCanvas(fig) # not needed in mpl >= 3.1
ax = fig.add_subplot()
avg.plot(ax=ax)
if len(highlight): highlight.plot(style='o', ax=ax)
return fig
def find_outliers(variable='Temperature', window=30, sigma=10, view_fn=mpl_plot):
avg = data[variable].rolling(window=window).mean()
residual = data[variable] - avg
std = residual.rolling(window=window).std()
outliers = (np.abs(residual) > std * sigma)
return view_fn(avg, avg[outliers])
# -
# We can call the function with parameters and get a plot:
find_outliers(variable='Temperature', window=20, sigma=10)
# It works! But exploring all these parameters by typing Python is slow and tedious. Plus we want our boss, or the boss's boss, to be able to try it out.
# If we wanted to try out lots of combinations of these values to understand how the window and sigma affect the plot, we could reevaluate the above cell lots of times, but that would be a slow and painful process, and is only really appropriate for users who are comfortable with editing Python code. In the next few examples we will demonstrate how to use Panel to quickly add some interactive controls to some object and make a simple app.
#
# To see an overview of the different APIs Panel offers see the [API user guide](../user_guide/APIs.ipynb) and for a quick reference for various Panel functionality see the [overview](../user_guide/Overview.ipynb).
#
# ## Interactive Panels
#
# Instead of editing code, it's much quicker and more straightforward to use sliders to adjust the values interactively. You can easily make a Panel app to explore a function's parameters using `pn.interact`, which is similar to the [ipywidgets interact function](https://ipywidgets.readthedocs.io/en/stable/examples/Using%20Interact.html):
# +
import panel as pn
pn.extension()
pn.interact(find_outliers)
# -
# As long as you have a live Python process running, dragging these widgets will trigger a call to the `find_outliers` callback function, evaluating it for whatever combination of parameter values you select and displaying the results. A Panel like this makes it very easy to explore any function that produces a visual result of a [supported type](https://github.com/pyviz/panel/issues/2), such as Matplotlib (as above), Bokeh, Plotly, Altair, or various text and image types.
#
# ## Components of Panels
#
# `interact` is convenient, but what if you want more control over how it looks or works? First, let's see what `interact` actually creates, by grabbing that object and displaying its representation:
kw = dict(window=(1, 60), variable=sorted(list(data.columns)), sigma=(1, 20))
i = pn.interact(find_outliers, **kw)
i.pprint()
# As you can see, the `interact` call created a `pn.Column` object consisting of a WidgetBox (with 3 widgets) and a `pn.Row` with one Matplotlib figure object. Panel is compositional, so you can mix and match these components any way you like, adding other objects as needed:
# +
text = "<br>\n# Room Occupancy\nSelect the variable, and the time window for smoothing"
p = pn.Row(i[1][0], pn.Column(text, i[0][0], i[0][1]))
p
# -
# Note that the widgets stay linked to their plot even if they are in a different notebook cell:
i[0][2]
# Also note that Panel widgets are reactive, so they will update even if you set the values by hand:
i[0][2].value = 5
# ## Composing new Panels
#
# You can use this compositional approach to combine different components such as widgets, plots, text, and other elements needed for an app or dashboard in arbitrary ways. The ``interact`` example builds on a reactive programming model, where an input to the function changes and Panel reactively updates the output of the function. ``interact`` is a convenient way to create widgets from the arguments to your function automatically, but Panel also provides a more explicit reactive API letting you specifically define connections between widgets and function arguments, and then lets you compose the resulting dashboard manually from scratch.
#
# In the example below we explicitly declare each of the components of an app: widgets, a function to return the plot, column and row containers, and the completed `occupancy` Panel app. Widget objects have multiple "parameters" (current value, allowed ranges, and so on), and here we will use Panel's ``bind`` function to declare that function's input values should come from the widgets' ``value`` parameters. Now when the function and the widgets are displayed, Panel will automatically update the displayed output whenever any of the inputs change:
# +
import panel.widgets as pnw
variable = pnw.RadioButtonGroup(name='variable', value='Temperature',
options=list(data.columns))
window = pnw.IntSlider(name='window', value=10, start=1, end=60)
reactive_outliers = pn.bind(find_outliers, variable, window, 10)
widgets = pn.Column("<br>\n# Room occupancy", variable, window)
occupancy = pn.Row(reactive_outliers, widgets)
occupancy
# -
# ## Deploying Panels
#
# The above panels all work in the notebook cell (if you have a live Jupyter kernel running), but unlike other approaches such as ipywidgets, Panel apps work just the same in a standalone server. For instance, the app above can be launched as its own web server on your machine by uncommenting and running the following cell:
# +
#occupancy.show()
# -
# Or, you can simply mark whatever you want to be in the separate web page with `.servable()`, and then run the shell command `panel serve --show Introduction.ipynb` to launch a server containing that object. (Here, we've also added a semicolon to avoid getting another copy of the occupancy app here in the notebook.)
occupancy.servable();
# During development, particularly when working with a raw script using `panel serve --show --autoreload` can be very useful as the application will automatically update whenever the script or notebook or any of its imports change.
# ## Declarative Panels
#
# The above compositional approach is very flexible, but it ties your domain-specific code (the parts about sine waves) with your widget display code. That's fine for small, quick projects or projects dominated by visualization code, but what about large-scale, long-lived projects, where the code is used in many different contexts over time, such as in large batch runs, one-off command-line usage, notebooks, and deployed dashboards? For larger projects like that, it's important to be able to separate the parts of the code that are about the underlying domain (i.e. application or research area) from those that are tied to specific display technologies (such as Jupyter notebooks or web servers).
#
# For such usages, Panel supports objects declared with the separate [Param](http://param.pyviz.org) library, which provides a GUI-independent way of capturing and declaring the parameters of your objects (and dependencies between your code and those parameters), in a way that's independent of any particular application or dashboard technology. For instance, the above code can be captured in an object that declares the ranges and values of all parameters, as well as how to generate the plot, independently of the Panel library or any other way of interacting with the object:
# +
import param
class RoomOccupancy(param.Parameterized):
variable = param.Selector(objects=list(data.columns))
window = param.Integer(default=10, bounds=(1, 20))
sigma = param.Number(default=10, bounds=(0, 20))
def view(self):
return find_outliers(self.variable, self.window, self.sigma)
obj = RoomOccupancy()
obj
# -
# The `RoomOccupancy` class and the `obj` instance have no dependency on Panel, Jupyter, or any other GUI or web toolkit; they simply declare facts about a certain domain (such as that smoothing requires window and sigma parameters, and that window is an integer greater than 0 and sigma is a positive real number). This information is then enough for Panel to create an editable and viewable representation for this object without having to specify anything that depends on the domain-specific details encapsulated in `obj`:
pn.Row(obj.param, obj.view)
# To support a particular domain, you can create hierarchies of such classes encapsulating all the parameters and functionality you need across different families of objects, with both parameters and code inheriting across the classes as appropriate, all without any dependency on a particular GUI library or even the presence of a GUI at all. This approach makes it practical to maintain a large codebase, all fully displayable and editable with Panel, in a way that can be maintained and adapted over time.
# ## Linking plots and actions between panes
#
# The above approaches each work with a very wide variety of displayable objects, including images, equations, tables, and plots. In each case, Panel provides interactive functionality using widgets and updates the displayed objects accordingly, while making very few assumptions about what actually is being displayed. Panel also supports richer, more dynamic interactivity where the displayed object is itself interactive, such as the JavaScript-based plots from Bokeh and Plotly.
#
# For instance, if we substitute the [Bokeh](http://bokeh.pydata.org) wrapper [hvPlot](http://hvplot.pyviz.org) for the Matplotlib wrapper provided with Pandas, we automatically get interactive plots that allow zooming, panning and hovering:
# +
import hvplot.pandas
def hvplot(avg, highlight):
return avg.hvplot(height=200) * highlight.hvplot.scatter(color='orange', padding=0.1)
text2 = "## Room Occupancy\nSelect the variable and the smoothing values"
hvp = pn.interact(find_outliers, view_fn=hvplot, **kw)
pn.Column(pn.Row(pn.panel(text2, width=400), hvp[0]), hvp[1]).servable("Occupancy")
# -
# These interactive actions can be combined with more complex interactions with a plot (e.g. tap, hover) to make it easy to explore data more deeply and uncover connections. For instance, we can use HoloViews to make a more full-featured version of the hvPlot example that displays a table of the current measurement values at the hover position on the plot:
# +
import holoviews as hv
tap = hv.streams.PointerX(x=data.index.min())
def hvplot2(avg, highlight):
line = avg.hvplot(height=300, width=500)
outliers = highlight.hvplot.scatter(color='orange', padding=0.1)
tap.source = line
return (line * outliers).opts(legend_position='top_right')
@pn.depends(tap.param.x)
def table(x):
index = np.abs((data.index - x).astype(int)).argmin()
return data.iloc[index]
app = pn.interact(find_outliers, view_fn=hvplot2, **kw)
pn.Row(
pn.Column("## Room Occupancy\nHover over the plot for more information.", app[0]),
pn.Row(app[1], table)
)
# -
# ## Exploring further
#
# For a quick reference of different Panel functionality refer to the [overview](../user_guide/Overview.ipynb). If you want a more detailed description of different ways of using Panel, each appropriate for different applications see the following materials:
#
# - [APIs](../user_guide/APIs.ipynb): An overview of the different APIs offered by Panel.
# - [Interact](../user_guide/Interact.ipynb): Instant GUI, given a function with arguments
# - [Widgets](../user_guide/Widgets.ipynb): Explicitly instantiating widgets and linking them to actions
# - [Parameters](../user_guide/Param.ipynb): Capturing parameters and their links to actions declaratively
#
# Just pick the style that seems most appropriate for the task you want to do, then study that section of the user guide. Regardless of which approach you take, you'll want to learn more about Panel's panes and layouts:
#
# - [Components](../user_guide/Components.ipynb): An overview of the core components of Panel including Panes, Widgets and Layouts
# - [Customization](../user_guide/Customization.ipynb): How to set styles and sizes of Panel components
# - [Deploy & Export](../user_guide/Deploy_and_Export.ipynb): An overview on how to display, export and deploy Panel apps and dashboards
#
#
# Finally, if you are building a complex multi-stage application, you can consider our support for organizing workflows consisting of multiple stages:
#
# - [Pipelines](../user_guide/Pipelines.ipynb): Making multi-stage processing pipelines in notebooks and as deployed apps
#
# Or for more polished apps you can make use of Templates to achieve exactly the look and feel you want:
#
# - [Templates](../user_guide/Templates.ipynb): Composing one or more Panel objects into jinja2 template with full control over layout and styling.
| examples/getting_started/Introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !rm -rf output-*/
# ## Test: levels = [0]
# !mkdir -p output-0
# ! PYTHONPATH="$PYTHONPATH:../../" \
# python -m FIDDLE.run \
# --data_fname='./input/data.csv' \
# --population_fname='./input/pop.csv' \
# --config_fname='./input/config-0.yaml' \
# --output_dir='./output-0/' \
# --T=4 --dt=1.0 \
# --theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
# --stats_functions 'min' 'max' 'mean'
# +
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-0/S_all.npz')
S_names = json.load(open('output-0/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-0/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
display(df_S)
# -
# ## Test: levels = [0,1]
# !mkdir -p output-1
# ! PYTHONPATH="$PYTHONPATH:../../" \
# python -m FIDDLE.run \
# --data_fname='./input/data.csv' \
# --population_fname='./input/pop.csv' \
# --config_fname='./input/config-1.yaml' \
# --output_dir='./output-1/' \
# --T=4 --dt=1.0 \
# --theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
# --stats_functions 'min' 'max' 'mean'
# +
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-1/S_all.npz')
S_names = json.load(open('output-1/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-1/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
display(df_S)
# -
# ## Test: levels = [0,1,2]
# !mkdir -p output-2
# ! PYTHONPATH="$PYTHONPATH:../../" \
# python -m FIDDLE.run \
# --data_fname='./input/data.csv' \
# --population_fname='./input/pop.csv' \
# --config_fname='./input/config-2.yaml' \
# --output_dir='./output-2/' \
# --T=4 --dt=1.0 \
# --theta_1=0.001 --theta_2=0.001 --theta_freq=1 \
# --stats_functions 'min' 'max' 'mean'
# +
import numpy as np
import pandas as pd
import json
import sparse
S = sparse.load_npz('output-2/S_all.npz')
S_names = json.load(open('output-2/S_all.feature_names.json', 'r'))
S_index = pd.read_csv('output-2/S.ID.csv').set_index(['ID'])
df_S = pd.DataFrame(S.todense(), columns=S_names, index=S_index.index)
display(df_S)
# -
| tests/icd_test/Run.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["remove_input"]
from datascience import *
# %matplotlib inline
path_data = '../../data/'
import matplotlib.pyplot as plots
plots.style.use('fivethirtyeight')
import numpy as np
# -
# ### Deflategate ###
# On January 18, 2015, the Indianapolis Colts and the New England Patriots played the American Football Conference (AFC) championship game to determine which of those teams would play in the Super Bowl. After the game, there were allegations that the Patriots' footballs had not been inflated as much as the regulations required; they were softer. This could be an advantage, as softer balls might be easier to catch.
#
# For several weeks, the world of American football was consumed by accusations, denials, theories, and suspicions: the press labeled the topic Deflategate, after the Watergate political scandal of the 1970's. The National Football League (NFL) commissioned an independent analysis. In this example, we will perform our own analysis of the data.
#
# Pressure is often measured in pounds per square inch (psi). NFL rules stipulate that game balls must be inflated to have pressures in the range 12.5 psi and 13.5 psi. Each team plays with 12 balls. Teams have the responsibility of maintaining the pressure in their own footballs, but game officials inspect the balls. Before the start of the AFC game, all the Patriots' balls were at about 12.5 psi. Most of the Colts' balls were at about 13.0 psi. However, these pre-game data were not recorded.
#
# During the second quarter, the Colts intercepted a Patriots ball. On the sidelines, they measured the pressure of the ball and determined that it was below the 12.5 psi threshold. Promptly, they informed officials.
#
# At half-time, all the game balls were collected for inspection. Two officials, <NAME> and <NAME>, measured the pressure in each of the balls.
#
# Here are the data. Each row corresponds to one football. Pressure is measured in psi. The Patriots ball that had been intercepted by the Colts was not inspected at half-time. Nor were most of the Colts' balls – the officials simply ran out of time and had to relinquish the balls for the start of second half play.
football = Table.read_table(path_data + 'deflategate.csv')
football.show()
# For each of the 15 balls that were inspected, the two officials got different results. It is not uncommon that repeated measurements on the same object yield different results, especially when the measurements are performed by different people. So we will assign to each the ball the average of the two measurements made on that ball.
football = football.with_column(
'Combined', (football.column(1)+football.column(2))/2
).drop(1, 2)
football.show()
# At a glance, it seems apparent that the Patriots' footballs were at a lower pressure than the Colts' balls. Because some deflation is normal during the course of a game, the independent analysts decided to calculate the drop in pressure from the start of the game. Recall that the Patriots' balls had all started out at about 12.5 psi, and the Colts' balls at about 13.0 psi. Therefore the drop in pressure for the Patriots' balls was computed as 12.5 minus the pressure at half-time, and the drop in pressure for the Colts' balls was 13.0 minus the pressure at half-time.
#
# We can calculate the drop in pressure for each football, by first setting up an array of the starting values. For this we will need an array consisting of 11 values each of which is 12.5, and another consisting of four values each of which is all 13. We will use the NumPy function `np.ones`, which takes a count as its argument and returns an array of that many elements, each of which is 1.
np.ones(11)
patriots_start = 12.5 * np.ones(11)
colts_start = 13 * np.ones(4)
start = np.append(patriots_start, colts_start)
start
# The drop in pressure for each football is the difference between the starting pressure and the combined pressure measurement.
drop = start - football.column('Combined')
football = football.with_column('Pressure Drop', drop)
football.show()
# It looks as though the Patriots' drops were larger than the Colts'. Let's look at the average drop in each of the two groups. We no longer need the combined scores.
football = football.drop('Combined')
football.group('Team', np.average)
# The average drop for the Patriots was about 1.2 psi compared to about 0.47 psi for the Colts.
#
# The question now is why the Patriots' footballs had a larger drop in pressure, on average, than the Colts footballs. Could it be due to chance?
#
# ### The Hypotheses ###
# How does chance come in here? Nothing was being selected at random. But we can make a chance model by hypothesizing that the 11 Patriots' drops look like a random sample of 11 out of all the 15 drops, with the Colts' drops being the remaining four. That's a completely specified chance model under which we can simulate data. So it's the **null hypothesis**.
#
# For the alternative, we can take the position that the Patriots' drops are too large, on average, to resemble a random sample drawn from all the drops.
#
# ### Test Statistic ###
# A natural statistic is the difference between the two average drops, which we will compute as "average drop for Patriots - average drop for Colts". Large values of this statistic will favor the alternative hypothesis.
# +
observed_means = football.group('Team', np.average).column(1)
observed_difference = observed_means.item(1) - observed_means.item(0)
observed_difference
# -
# This positive difference reflects the fact that the average drop in pressure of the Patriots' footballs was greater than that of the Colts.
# The function `difference_of_means` takes three arguments:
#
# - the name of the table of data
# - the label of the column containing the numerical variable whose average is of interest
# - the label of the column containing the two group labels
#
# It returns the difference between the means of the two groups.
#
# We have defined this function in an earlier section. The definition is repeated here for ease of reference.
def difference_of_means(table, label, group_label):
reduced = table.select(label, group_label)
means_table = reduced.group(group_label, np.average)
means = means_table.column(1)
return means.item(1) - means.item(0)
difference_of_means(football, 'Pressure Drop', 'Team')
# Notice that the difference has been calculated as Patriots' drops minus Colts' drops as before.
# ### Predicting the Statistic Under the Null Hypothesis ###
# If the null hypothesis were true, then it shouldn't matter which footballs are labeled Patriots and which are labeled Colts. The distributions of the two sets of drops would be the same. We can simulate this by randomly shuffling the team labels.
shuffled_labels = football.sample(with_replacement=False).column(0)
original_and_shuffled = football.with_column('Shuffled Label', shuffled_labels)
original_and_shuffled.show()
# How do all the group averages compare?
difference_of_means(original_and_shuffled, 'Pressure Drop', 'Shuffled Label')
difference_of_means(original_and_shuffled, 'Pressure Drop', 'Team')
# The two teams' average drop values are closer when the team labels are randomly assigned to the footballs than they were for the two groups actually used in the game.
#
# ### Permutation Test ###
# It's time for a step that is now familiar. We will do repeated simulations of the test statistic under the null hypothesis, by repeatedly permuting the footballs and assigning random sets to the two teams.
#
# Once again, we will use the function `one_simulated_difference` defined in an earlier section as follows.
def one_simulated_difference(table, label, group_label):
shuffled_labels = table.sample(with_replacement = False
).column(group_label)
shuffled_table = table.select(label).with_column(
'Shuffled Label', shuffled_labels)
return difference_of_means(shuffled_table, label, 'Shuffled Label')
# We can now use this function to create an array `differences` that contains 10,000 values of the test statistic simulated under the null hypothesis.
# +
differences = make_array()
repetitions = 10000
for i in np.arange(repetitions):
new_difference = one_simulated_difference(football, 'Pressure Drop', 'Team')
differences = np.append(differences, new_difference)
# -
# ### Conclusion of the Test ###
# To calculate the empirical P-value, it's important to recall the alternative hypothesis, which is that the Patriots' drops are too large to be the result of chance variation alone.
#
# Larger drops for the Patriots favor the alternative hypothesis. So the P-value is the chance (computed under the null hypothesis) of getting a test statistic equal to our observed value of 0.733522727272728 or larger.
empirical_P = np.count_nonzero(differences >= observed_difference) / 10000
empirical_P
# That's a pretty small P-value. To visualize this, here is the empirical distribution of the test statistic under the null hypothesis, with the observed statistic marked on the horizontal axis.
Table().with_column('Difference Between Group Averages', differences).hist()
plots.scatter(observed_difference, 0, color='red', s=30)
plots.title('Prediction Under the Null Hypothesis')
print('Observed Difference:', observed_difference)
print('Empirical P-value:', empirical_P)
# As in previous examples of this test, the bulk of the distribution is centered around 0. Under the null hypothesis, the Patriots' drops are a random sample of all 15 drops, and therefore so are the Colts'. Therefore the two sets of drops should be about equal on average, and therefore their difference should be around 0.
#
# But the observed value of the test statistic is quite far away from the heart of the distribution. By any reasonable cutoff for what is "small", the empirical P-value is small. So we end up rejecting the null hypothesis of randomness, and conclude that the Patriots drops were too large to reflect chance variation alone.
#
# The independent investigative team analyzed the data in several different ways, taking into account the laws of physics. The final report said,
#
# > "[T]he average pressure drop of the Patriots game balls exceeded the average pressure drop of the Colts balls by 0.45 to 1.02 psi, depending on various possible assumptions regarding the gauges used, and assuming an initial pressure of 12.5 psi for the Patriots balls and 13.0 for the Colts balls."
# >
# > -- *Investigative report commissioned by the NFL regarding the AFC Championship game on January 18, 2015*
#
# Our analysis shows an average pressure drop of about 0.73 psi, which is close to the center of the interval "0.45 to 1.02 psi" and therefore consistent with the official analysis.
# Remember that our test of hypotheses does not establish the reason *why* the difference is not due to chance. Establishing causality is usually more complex than running a test of hypotheses.
#
# But the all-important question in the football world was about causation: the question was whether the excess drop of pressure in the Patriots' footballs was deliberate. If you are curious about the answer given by the investigators, here is the [full report](https://nfllabor.files.wordpress.com/2015/05/investigative-and-expert-reports-re-footballs-used-during-afc-championsh.pdf).
| interactivecontent/compare-two-samples-by-bootstrapping/deflategate.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import L96sim
from L96_emulator.util import dtype, dtype_np, device
res_dir = '/gpfs/work/nonnenma/results/emulators/L96/'
data_dir = '/gpfs/work/nonnenma/data/emulators/L96/'
# +
from L96_emulator.run import setup
from L96_emulator.run_DA import setup_4DVar
from L96_emulator.likelihood import ObsOp_identity, ObsOp_subsampleGaussian, ObsOp_rotsampleGaussian
from L96_emulator.data_assimilation import GenModel, get_model, as_tensor
from L96_emulator.util import sortL96fromChannels, sortL96intoChannels
import torch
clrs, lgnd = ['w', 'b', 'c', 'g', 'y', 'r', 'm', 'k'], []
def get_analysis_rmses_4DVar_exp(exp_ids, ifplot=False):
win_lens = np.zeros(len(exp_ids))
if ifplot:
plt.figure(figsize=(16,6))
plt.subplot(1,3,1)
for clr in clrs:
plt.plot(-100, -1, 'o-', color=clr, linewidth=2.5)
rmses_total = []
for eid, exp_id in enumerate(exp_ids):
exp_names = os.listdir('experiments_DA/')
conf_exp = exp_names[np.where(np.array([name.split('_')[0] for name in exp_names])==str(exp_id))[0][0]][:-4]
args = setup_4DVar(conf_exp=f'experiments_DA/{conf_exp}.yml')
args.pop('conf_exp')
save_dir = 'results/data_assimilation/' + args['exp_id'] + '/'
fn = save_dir + 'out.npy'
out = np.load(res_dir + fn, allow_pickle=True)[()]
J = args['J']
n_steps = args['n_steps']
T_win = args['T_win']
T_shift = args['T_shift'] if args['T_shift'] >= 0 else T_win
dt = args['dt']
data = out['out']
y, m = out['y'], out['m']
x_sols = out['x_sols']
losses, times = out['losses'], out['times']
assert T_win == out['T_win']
mses = np.zeros(((data.shape[0] - T_win) // T_shift + 1, data.shape[1]))
print()
for i in range(len(mses)):
mse = np.nanmean((x_sols[i:i+1] - data)**2, axis=(-2, -1))
mses[i] = mse[i *T_shift]
if ifplot:
xx = np.arange(0, data.shape[0] - T_win, T_shift)
plt.subplot(1,3,1)
plt.plot(xx, mses, 'o-', color=clrs[eid], linewidth=2.5)
plt.xlim(0, len(data))
plt.subplot(1,3,2)
plt.plot(xx, np.nanmean(mses, axis=1), 'o-', color=clrs[eid], linewidth=2.5)
plt.xlim(0, len(data))
plt.subplot(1,3,3)
plt.plot(xx, mses, 'o-', color=clrs[eid], linewidth=2.5)
plt.axis([0, len(data)-1, 0, 2])
print(np.nanmean(mses[1:]))
lgnd.append('window length='+str(T_win))
rmses_total.append(np.sqrt(mses))
win_lens[eid] = T_win
if ifplot:
plt.subplot(1,3,1)
plt.title('individial trials')
plt.ylabel('initial state MSE')
plt.subplot(1,3,2)
plt.title('averages over trials')
plt.legend(lgnd[:3])
plt.xlabel('time t')
plt.subplot(1,3,3)
plt.title('inidividual trials, zoom-in on small MSEs')
plt.show()
return win_lens, rmses_total
def get_pred_rmses_4DVar_exp(exp_id, forecast_len=120):
exp_names = os.listdir('experiments_DA/')
conf_exp = exp_names[np.where(np.array([name.split('_')[0] for name in exp_names])==str(exp_id))[0][0]][:-4]
args = setup_4DVar(conf_exp=f'experiments_DA/{conf_exp}.yml')
args.pop('conf_exp')
#assert args['T_win'] == 64 # we want 4d integration window here
K,J = args['K'], args['J']
T_win = args['T_win']
model_pars = {
'exp_id' : args['model_exp_id'],
'model_forwarder' : 'rk4_default',
'K_net' : args['K'],
'J_net' : args['J'],
'dt_net' : args['dt']
}
model, model_forwarder, _ = get_model(model_pars, res_dir=res_dir, exp_dir='')
obs_operator = args['obs_operator']
obs_pars = {}
if obs_operator=='ObsOp_subsampleGaussian':
obs_pars['obs_operator'] = ObsOp_subsampleGaussian
obs_pars['obs_operator_args'] = {'r' : args['obs_operator_r'], 'sigma2' : args['obs_operator_sig2']}
elif obs_operator=='ObsOp_identity':
obs_pars['obs_operator'] = ObsOp_identity
obs_pars['obs_operator_args'] = {}
elif obs_operator=='ObsOp_rotsampleGaussian':
obs_pars['obs_operator'] = ObsOp_rotsampleGaussian
obs_pars['obs_operator_args'] = {'frq' : args['obs_operator_frq'],
'sigma2' : args['obs_operator_sig2']}
else:
raise NotImplementedError()
model_observer = obs_pars['obs_operator'](**obs_pars['obs_operator_args'])
prior = torch.distributions.normal.Normal(loc=torch.zeros((1,J+1,K)),
scale=1.*torch.ones((1,J+1,K)))
# ### define generative model for observed data
gen = GenModel(model_forwarder, model_observer, prior, T=T_win, x_init=None)
forecast_win = int(forecast_len/1.5) # 5d forecast
eval_every = int(6/1.5) # every 6h
save_dir = 'results/data_assimilation/' + args['exp_id'] + '/'
fn = save_dir + 'out.npy'
out = np.load(res_dir + fn, allow_pickle=True)[()]
J = args['J']
n_steps = args['n_steps']
T_win = args['T_win']
T_shift = args['T_shift'] if args['T_shift'] >= 0 else T_win
dt = args['dt']
data = out['out']
y, m = out['y'], out['m']
x_sols = out['x_sols']
print('percent of NaN sols', str(np.mean(np.isnan(x_sols))))
losses, times = out['losses'], out['times']
assert T_win == out['T_win']
mses = np.zeros(((data.shape[0] - forecast_win - T_win) // T_shift + 1, forecast_win//eval_every+1, y.shape[1]))
for i in range(len(mses)):
forecasts = gen._forward(x=as_tensor(x_sols[i]), T_obs=T_win + np.arange(0,forecast_win+1,eval_every))
n = i * T_shift + T_win
for j in range(mses.shape[1]): # loop over integration windows
forecast = forecasts[j].detach().cpu().numpy()
if np.any(np.isnan(forecast)):
print('warning - had NaN in forecasts!')
y_obs = data[n+j*eval_every]
mses[i,j] = np.nanmean((forecast - y_obs)**2, axis=(-2, -1))
pred_lens = 1.5/24 * np.arange(0, forecast_win+1, eval_every)
return pred_lens, np.sqrt(mses)
# +
exp_ids_analyticNet = ['76', '72', '68', '64', '60', '56', '52', '48', '80']
exp_ids_deepNet = ['92', '91', '90', '89', '88', '87', '86', '84', '85']
win_lens_analyticNet, rmses_analysis_analyticNet = get_analysis_rmses_4DVar_exp(exp_ids=exp_ids_analyticNet)
win_lens_deepNet, rmses_analysis_deepNet = get_analysis_rmses_4DVar_exp(exp_ids=exp_ids_deepNet)
pred_lens_analyticNet, rmses_pred_analyticNet = get_pred_rmses_4DVar_exp(exp_id='48')
pred_lens_deepNet, rmses_pred_deepNet = get_pred_rmses_4DVar_exp(exp_id='84')
# -
exp_id = '48'
exp_names = os.listdir('experiments_DA/')
conf_exp = exp_names[np.where(np.array([name.split('_')[0] for name in exp_names])==str(exp_id))[0][0]][:-4]
args = setup_4DVar(conf_exp=f'experiments_DA/{conf_exp}.yml')
args['exp_id']
# +
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from L96_emulator.networks import named_network
exp_id = '89'
exp_names = os.listdir('experiments_DA/')
conf_exp = exp_names[np.where(np.array([name.split('_')[0] for name in exp_names])==str(exp_id))[0][0]][:-4]
args = setup_4DVar(conf_exp=f'experiments_DA/{conf_exp}.yml')
args.pop('conf_exp')
K,J = args['K'], args['J']
T_win = args['T_win']
model_pars = {
'exp_id' : args['model_exp_id'],
'model_forwarder' : 'rk4_default',
'K_net' : args['K'],
'J_net' : args['J'],
'dt_net' : args['dt']
}
model, model_forwarder, _ = get_model(model_pars, res_dir=res_dir, exp_dir='')
obs_pars = {'obs_operator' : ObsOp_rotsampleGaussian,
'obs_operator_args' : {'frq' : args['obs_operator_frq'],
'sigma2' : args['obs_operator_sig2']}}
model_observer = obs_pars['obs_operator'](**obs_pars['obs_operator_args'])
prior = torch.distributions.normal.Normal(loc=torch.zeros((1,J+1,K)),
scale=1.*torch.ones((1,J+1,K)))
gen_emulator = GenModel(model_forwarder, model_observer, prior, T=T_win, x_init=None)
model_simulator, model_forwarder_simulator = named_network(
model_name='BilinearConvNetL96',
n_input_channels=model_pars['J_net']+1,
n_output_channels=model_pars['J_net']+1,
K_net=model_pars['K_net'],
J_net=model_pars['J_net'],
F_net=8.,
init_net='analytical',
dt_net=model_pars['dt_net'],
model_forwarder='rk4_default',
padding_mode='circular',
seq_length=1
)
gen_simulator = GenModel(model_forwarder_simulator, model_observer, prior, T=T_win, x_init=None)
save_dir = 'results/data_assimilation/' + args['exp_id'] + '/'
fn = save_dir + 'out.npy'
out = np.load(res_dir + fn, allow_pickle=True)[()]
nc =4
win_length, forecast_length = args['T_win'], 80
n = 50 # pick an integration window
t0 = n*args['T_shift'] - win_length//2
t1 = n*args['T_shift']
t2 = t1 + win_length
t3 = t2 + forecast_length
data = out['y'][t0:t3,nc].T
data[data==0] = np.nan # display missing values as missing
data[:,-t3+t2:] = np.nan # mask out future
recon_emulator = np.nan * np.zeros_like(data)
background = torch.stack(gen_emulator._forward(as_tensor(out['x_sols'][n-1,nc]),
T_obs=np.arange(win_length//2,win_length))).squeeze().detach().cpu().numpy().T
forecast = torch.stack(gen_emulator._forward(as_tensor(out['x_sols'][n,nc]),
T_obs=np.arange(win_length+forecast_length))).squeeze().detach().cpu().numpy().T
recon_emulator[:,:win_length//2] = np.nan * background
recon_emulator[:,win_length//2:] = forecast
recon_simulator = np.nan * np.zeros_like(data)
background = torch.stack(gen_simulator._forward(as_tensor(out['x_sols'][n-1,nc]),
T_obs=np.arange(win_length//2,win_length))).squeeze().detach().cpu().numpy().T
forecast = torch.stack(gen_simulator._forward(as_tensor(out['x_sols'][n,nc]),
T_obs=np.arange(win_length+forecast_length))).squeeze().detach().cpu().numpy().T
recon_simulator[:,:win_length//2] = np.nan * background
recon_simulator[:,win_length//2:] = forecast
fontsize=18
plt.figure(figsize=(16*4/3,16))
ax = plt.subplot(6,2,1)
true = sortL96fromChannels(out['out'][t0:t3,nc]).T
plt.imshow(true, aspect='auto')
plt.yticks([], fontsize=fontsize)
plt.axis([0, t3-t0, 0, K])
plt.xticks([win_length//2, int(0.5/args['dt']), int(1.0/args['dt']), int(1.5/args['dt'])],
[r'$t_0$', 0.5, 1.0, 1.5], fontsize=fontsize)
plt.xlabel('time [au]', fontsize=fontsize, x=0.46)
plt.ylabel('true simulation', fontsize=fontsize)
axins = inset_axes(ax, width="5%", height="100%", loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax.transAxes, borderpad=0)
plt.colorbar(cax=axins)
ax = plt.subplot(6,2,3)
plt.imshow(data, aspect='auto')
plt.yticks([], fontsize=fontsize)
plt.plot([t1-t0-0.5, t1-t0-0.5], [0,K], linewidth=2, color='orange', label='integration window')
plt.plot([t2-t0-0.5, t2-t0-0.5], [0,K], linewidth=2, color='orange')
plt.plot([t1-t0-0.5, t2-t0-0.5], [0,0], linewidth=2, color='orange')
plt.plot([t1-t0-0.5, t2-t0-0.5], [K,K], linewidth=2, color='orange')
plt.plot([t2-t0, t2-t0], [0,K], linewidth=2, color='purple', label='forecast window')
plt.plot([t3-t0, t3-t0], [0,K], linewidth=2, color='purple')
plt.plot([t2-t0, t3-t0], [0,0], linewidth=2, color='purple')
plt.plot([t2-t0, t3-t0], [K,K], linewidth=2, color='purple')
plt.legend(fontsize=fontsize, frameon=False, handlelength=1.0)
plt.xticks([win_length//2, int(0.5/args['dt']), int(1.0/args['dt']), int(1.5/args['dt'])],
[r'$t_0$', 0.5, 1.0, 1.5], fontsize=fontsize)
plt.ylabel('observations', fontsize=fontsize)
plt.axis([0, t3-t0, 0, K+0.5])
axins = inset_axes(ax, width="5%", height="100%", loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax.transAxes, borderpad=0)
plt.colorbar(cax=axins)
ax = plt.subplot(6,2,5)
plt.imshow(recon_simulator, aspect='auto')
plt.yticks([], fontsize=fontsize)
plt.plot([t1-t0-0.5, t1-t0-0.5], [0,K], linewidth=2, color='orange', label='integration window')
plt.plot([t2-t0-0.5, t2-t0-0.5], [0,K], linewidth=2, color='orange')
plt.plot([t1-t0-0.5, t2-t0-0.5], [0,0], linewidth=2, color='orange')
plt.plot([t1-t0-0.5, t2-t0-0.5], [K,K], linewidth=2, color='orange')
plt.plot([t2-t0, t2-t0], [0,K], linewidth=2, color='purple', label='forecast window')
plt.plot([t3-t0, t3-t0], [0,K], linewidth=2, color='purple')
plt.plot([t2-t0, t3-t0], [0,0], linewidth=2, color='purple')
plt.plot([t2-t0, t3-t0], [K,K], linewidth=2, color='purple')
plt.axis([0, t3-t0, 0, K])
plt.xticks([win_length//2, int(0.5/args['dt']), int(1.0/args['dt']), int(1.5/args['dt'])],
[r'$t_0$', 0.5, 1.0, 1.5], fontsize=fontsize)
plt.xlabel('time [au]', fontsize=fontsize, x=0.46)
plt.ylabel('simulator \n reconstruction', fontsize=fontsize)
axins = inset_axes(ax, width="5%", height="100%", loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax.transAxes, borderpad=0)
plt.colorbar(cax=axins)
ax = plt.subplot(6,2,7)
plt.imshow(recon_simulator-true, aspect='auto', cmap='bwr')
plt.yticks([], fontsize=fontsize)
plt.axis([0, t3-t0, 0, K])
plt.xticks([win_length//2, int(0.5/args['dt']), int(1.0/args['dt']), int(1.5/args['dt'])],
[r'$t_0$', 0.5, 1.0, 1.5], fontsize=fontsize)
plt.xlabel('time [au]', fontsize=fontsize, x=0.46)
plt.ylabel('simulator \n reconstr. - true', fontsize=fontsize)
axins = inset_axes(ax, width="5%", height="100%", loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax.transAxes, borderpad=0)
plt.colorbar(cax=axins)
ax = plt.subplot(6,2,9)
plt.imshow(recon_emulator, aspect='auto')
plt.yticks([], fontsize=fontsize)
plt.plot([t1-t0-0.5, t1-t0-0.5], [0,K], linewidth=2, color='orange', label='integration window')
plt.plot([t2-t0-0.5, t2-t0-0.5], [0,K], linewidth=2, color='orange')
plt.plot([t1-t0-0.5, t2-t0-0.5], [0,0], linewidth=2, color='orange')
plt.plot([t1-t0-0.5, t2-t0-0.5], [K,K], linewidth=2, color='orange')
plt.plot([t2-t0, t2-t0], [0,K], linewidth=2, color='purple', label='forecast window')
plt.plot([t3-t0, t3-t0], [0,K], linewidth=2, color='purple')
plt.plot([t2-t0, t3-t0], [0,0], linewidth=2, color='purple')
plt.plot([t2-t0, t3-t0], [K,K], linewidth=2, color='purple')
plt.axis([0, t3-t0, 0, K])
plt.xticks([win_length//2, int(0.5/args['dt']), int(1.0/args['dt']), int(1.5/args['dt'])],
[r'$t_0$', 0.5, 1.0, 1.5], fontsize=fontsize)
plt.xlabel('time [au]', fontsize=fontsize, x=0.46)
plt.ylabel('emulator \n reconstruction', fontsize=fontsize)
axins = inset_axes(ax, width="5%", height="100%", loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax.transAxes, borderpad=0)
plt.colorbar(cax=axins)
ax = plt.subplot(6,2,11)
plt.imshow(recon_emulator-true, aspect='auto', cmap='bwr')
plt.yticks([], fontsize=fontsize)
plt.axis([0, t3-t0, 0, K])
plt.xticks([win_length//2, int(0.5/args['dt']), int(1.0/args['dt']), int(1.5/args['dt'])],
[r'$t_0$', 0.5, 1.0, 1.5], fontsize=fontsize)
plt.xlabel('time [au]', fontsize=fontsize, x=0.46)
plt.ylabel('emulator \n reconstr. - true', fontsize=fontsize)
axins = inset_axes(ax, width="5%", height="100%", loc='lower left',
bbox_to_anchor=(1.05, 0., 1, 1), bbox_transform=ax.transAxes, borderpad=0)
plt.colorbar(cax=axins)
ax = plt.subplot(3,4,3)
plt.plot(sortL96fromChannels(out['out'][t1,nc]), color='blue', label = 'true state', linewidth=1.5)
plt.plot(background[:,-1], '--', color='black', label = 'background')
plt.plot(sortL96fromChannels(out['x_sols'][n,nc]), color='orange', label = 'analysis', linewidth=1.5)
plt.legend(fontsize=fontsize, bbox_to_anchor=(0.43, 0.61), handlelength=0.7, frameon=False)
plt.xlabel(r'position $k$', fontsize=fontsize)
plt.ylabel(r'iniital state $x(t_0)$', fontsize=fontsize)
plt.xticks([10, 20, 30, 40], fontsize=fontsize)
plt.yticks([-5, 0, 5, 10], fontsize=fontsize)
#plt.axis([0,41,-7,22])
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
box = ax.get_position()
box.x0 += 0.3 * (box.x1-box.x0)
box.y0 += 0.05 * (box.y1-box.y0)
ax.set_position(box)
print(np.mean((sortL96fromChannels(out['out'][t1,nc])-background[:,-1])**2))
print(np.mean((sortL96fromChannels(out['out'][t1,nc])-sortL96fromChannels(out['x_sols'][n,nc]))**2))
print(np.mean((sortL96fromChannels(out['out'][t1,nc])-out['x_sols'][n,nc])**2))
ax = plt.subplot(3,4,7)
nanmean = np.array([np.nanmean(rmses) for rmses in rmses_analysis_analyticNet])
nanstd = np.array([np.nanstd(rmses) for rmses in rmses_analysis_analyticNet])
plt.plot(win_lens_analyticNet*1.5/24, nanmean,
'-', color='blue', linewidth=2.5, label='simulator')
plt.plot(win_lens_analyticNet*1.5/24, nanmean-nanstd, '--',
color='blue', linewidth=1.0)
plt.plot(win_lens_analyticNet*1.5/24, nanmean+nanstd, '--',
color='blue', linewidth=1.0)
nanmean = np.array([np.nanmean(rmses) for rmses in rmses_analysis_deepNet])
nanstd = np.array([np.nanstd(rmses) for rmses in rmses_analysis_deepNet])
plt.plot(win_lens_deepNet*1.5/24, nanmean,
'-', color='orange', linewidth=2.5, label='emulator')
plt.plot(win_lens_deepNet*1.5/24, nanmean-nanstd, '--',
color='orange', linewidth=1.0)
plt.plot(win_lens_deepNet*1.5/24, nanmean+nanstd, '--',
color='orange', linewidth=1.0)
plt.xlabel('integration window length [au]', fontsize=fontsize)
plt.ylabel('analysis RMSE', fontsize=fontsize)
plt.yticks([0.4, 0.6, 0.8], fontsize=fontsize)
plt.xticks(win_lens_deepNet[::4]*1.5/24, win_lens_deepNet[::4]*args['dt'], fontsize=fontsize)
plt.legend(fontsize=fontsize, frameon=False, handlelength=0.7)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
box = ax.get_position()
box.x0 += 0.3 * (box.x1-box.x0)
box.y0 += 0.05 * (box.y1-box.y0)
ax.set_position(box)
ax = plt.subplot(3,4,11)
rmses_preds = [rmses_pred_analyticNet, rmses_pred_deepNet]
pred_lens = [pred_lens_analyticNet, pred_lens_deepNet]
clrs = ['b', 'orange']
labels = ['simulator', 'emulator']
for rmses_pred, pred_len, clr, lbl in zip(rmses_preds, pred_lens, clrs, labels):
nanmean, nanstd = np.nanmean(rmses_pred, axis=(0,-1)), np.nanstd(rmses_pred, axis=(0,-1))
#nanstd /= np.sqrt(rmses_pred.shape[0] * rmses_pred.shape[-1])
plt.plot(pred_len, nanmean, '-', color=clr, linewidth=2.5, label=lbl)
plt.plot(pred_len, nanmean-nanstd, '--', color=clr, linewidth=1.0)
plt.plot(pred_len, nanmean+nanstd, '--', color=clr, linewidth=1.0)
plt.xlabel('forecast time [au]', fontsize=fontsize)
plt.ylabel('prediction RMSE', fontsize=fontsize)
plt.yticks([0., 1, 2.0], fontsize=fontsize)
#plt.xticks(pred_len[::8], pred_len[::8]*16*args['dt'], fontsize=fontsize)
plt.xticks(np.linspace(0,5,3), np.linspace(0,5,3)*16*args['dt'], fontsize=fontsize)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.gcf().text(0.09, 0.88, 'a)', fontsize=fontsize, weight='bold')
plt.gcf().text(0.09, 0.75, 'b)', fontsize=fontsize, weight='bold')
plt.gcf().text(0.09, 0.62, 'c)', fontsize=fontsize, weight='bold')
plt.gcf().text(0.09, 0.36, 'd)', fontsize=fontsize, weight='bold')
plt.gcf().text(0.53, 0.88, 'e)', fontsize=fontsize, weight='bold')
plt.gcf().text(0.53, 0.64, 'f)', fontsize=fontsize, weight='bold')
plt.gcf().text(0.53, 0.33, 'g)', fontsize=fontsize, weight='bold')
box = ax.get_position()
box.x0 += 0.3 * (box.x1-box.x0)
box.y1 -= 0.05 * (box.y1-box.y0)
ax.set_position(box)
plt.savefig(res_dir + 'figs/4DVar.pdf', bbox_inches='tight', pad_inches=0, frameon=False)
plt.show()
# -
# # illusstration figure on best-possible analysis errors
exp_id = '92'
exp_ids_deepNet = [exp_id]
win_lens_deepNet, rmses_analysis_deepNet = get_analysis_rmses_4DVar_exp(exp_ids=exp_ids_deepNet)
plt.plot(np.array(rmses_analysis_deepNet).squeeze())
plt.show()
# +
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
exp_names = os.listdir('experiments_DA/')
conf_exp = exp_names[np.where(np.array([name.split('_')[0] for name in exp_names])==str(exp_id))[0][0]][:-4]
args = setup_4DVar(conf_exp=f'experiments_DA/{conf_exp}.yml')
args.pop('conf_exp')
K,J = args['K'], args['J']
T_win = args['T_win']
model_pars = {
'exp_id' : args['model_exp_id'],
'model_forwarder' : 'rk4_default',
'K_net' : args['K'],
'J_net' : args['J'],
'dt_net' : args['dt']
}
model, model_forwarder, _ = get_model(model_pars, res_dir=res_dir, exp_dir='')
obs_pars = {'obs_operator' : ObsOp_rotsampleGaussian,
'obs_operator_args' : {'frq' : args['obs_operator_frq'],
'sigma2' : args['obs_operator_sig2']}}
model_observer = obs_pars['obs_operator'](**obs_pars['obs_operator_args'])
prior = torch.distributions.normal.Normal(loc=torch.zeros((1,J+1,K)),
scale=1.*torch.ones((1,J+1,K)))
gen = GenModel(model_forwarder, model_observer, prior, T=T_win, x_init=None)
save_dir = 'results/data_assimilation/' + args['exp_id'] + '/'
fn = save_dir + 'out.npy'
out = np.load(res_dir + fn, allow_pickle=True)[()]
# +
def get_pred_rmses_4DVar_exp(exp_id, forecast_len=120):
exp_names = os.listdir('experiments_DA/')
conf_exp = exp_names[np.where(np.array([name.split('_')[0] for name in exp_names])==str(exp_id))[0][0]][:-4]
args = setup_4DVar(conf_exp=f'experiments_DA/{conf_exp}.yml')
args.pop('conf_exp')
#assert args['T_win'] == 64 # we want 4d integration window here
K,J = args['K'], args['J']
T_win = args['T_win']
model_pars = {
'exp_id' : args['model_exp_id'],
'model_forwarder' : 'rk4_default',
'K_net' : args['K'],
'J_net' : args['J'],
'dt_net' : args['dt']
}
model, model_forwarder, _ = get_model(model_pars, res_dir=res_dir, exp_dir='')
obs_operator = args['obs_operator']
obs_pars = {}
if obs_operator=='ObsOp_subsampleGaussian':
obs_pars['obs_operator'] = ObsOp_subsampleGaussian
obs_pars['obs_operator_args'] = {'r' : args['obs_operator_r'], 'sigma2' : args['obs_operator_sig2']}
elif obs_operator=='ObsOp_identity':
obs_pars['obs_operator'] = ObsOp_identity
obs_pars['obs_operator_args'] = {}
elif obs_operator=='ObsOp_rotsampleGaussian':
obs_pars['obs_operator'] = ObsOp_rotsampleGaussian
obs_pars['obs_operator_args'] = {'frq' : args['obs_operator_frq'],
'sigma2' : args['obs_operator_sig2']}
else:
raise NotImplementedError()
model_observer = obs_pars['obs_operator'](**obs_pars['obs_operator_args'])
prior = torch.distributions.normal.Normal(loc=torch.zeros((1,J+1,K)),
scale=1.*torch.ones((1,J+1,K)))
# ### define generative model for observed data
gen = GenModel(model_forwarder, model_observer, prior, T=T_win, x_init=None)
forecast_win = int(forecast_len/1.5) # 5d forecast
eval_every = int(1.5/1.5) # every 6h
save_dir = 'results/data_assimilation/' + args['exp_id'] + '/'
fn = save_dir + 'out.npy'
out = np.load(res_dir + fn, allow_pickle=True)[()]
J = args['J']
n_steps = args['n_steps']
T_win = args['T_win']
T_shift = args['T_shift'] if args['T_shift'] >= 0 else T_win
dt = args['dt']
data = out['out']
y, m = out['y'], out['m']
x_sols = out['x_sols']
print('percent of NaN sols', str(np.mean(np.isnan(x_sols))))
losses, times = out['losses'], out['times']
assert T_win == out['T_win']
mses = np.zeros(((data.shape[0] - forecast_win - T_win) // T_shift + 1, forecast_win//eval_every+1, y.shape[1]))
for i in range(len(mses)):
forecasts = gen._forward(x=as_tensor(x_sols[i]), T_obs=np.arange(0,forecast_win+1,eval_every))
n = i * T_shift
for j in range(mses.shape[1]): # loop over integration windows
forecast = forecasts[j].detach().cpu().numpy()
if np.any(np.isnan(forecast)):
print('warning - had NaN in forecasts!')
y_obs = data[n+j*eval_every]
mses[i,j] = np.nanmean((forecast - y_obs)**2, axis=(-2, -1))
pred_lens = 1.5/24 * np.arange(0, forecast_win+1, eval_every)
return pred_lens, np.sqrt(mses)
pred_lens_deepNet, rmses_pred_deepNet = get_pred_rmses_4DVar_exp(exp_id=exp_id, forecast_len=int(T_win*1.5))
# -
np.mean(rmses_pred_deepNet[:,:,:].mean(axis=(0,2)))
# +
i = 0
i_plot = 1
plt.figure(figsize=(12, 8))
plt.subplot(2,3,1)
plt.plot(rmses_pred_deepNet[:,:,:].mean(axis=(0,2)))
plt.ylabel('analysis RMSE')
plt.xlabel('position within integration window')
for offset in [0, np.argmin(rmses_pred_deepNet[:,:,:].mean(axis=(0,2)))]:
x_true = sortL96fromChannels(out['out'])[offset:out['x_sols'].shape[0]+offset,i,:].T
x_sols = sortL96fromChannels(out['x_sols'])[:,i,:].T
x_pred = gen._forward(sortL96intoChannels(as_tensor(x_sols.T),J=0) , T_obs=[offset])[0].detach().cpu().numpy()
x_pred = sortL96fromChannels(x_pred).T
plt.subplot(3,3,i_plot+1)
plt.imshow(x_true, aspect='auto')
plt.colorbar()
if i_plot == 1:
plt.ylabel('true state')
plt.yticks([])
plt.title(f'offset={offset}')
plt.subplot(3,3,i_plot+4)
plt.imshow(x_pred, aspect='auto')
plt.colorbar()
if i_plot == 1:
plt.ylabel('4D-Var analysis')
plt.yticks([])
plt.subplot(3,3,i_plot+7)
plt.imshow(x_true - x_pred, cmap='bwr', aspect='auto')
plt.colorbar()
if i_plot == 1:
plt.ylabel('difference')
plt.yticks([])
i_plot += 1
plt.show()
# -
| figure_DA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # United States - Crime Rates - 1960 - 2014
# ### Introduction:
#
# This time you will create a data
#
# Special thanks to: https://github.com/justmarkham for sharing the dataset and materials.
#
# ### Step 1. Import the necessary libraries
import pandas as pd
import numpy as np
# ### Step 2. Import the dataset from this [address](https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv).
# ### Step 3. Assign it to a variable called crime.
url = "https://raw.githubusercontent.com/guipsamora/pandas_exercises/master/04_Apply/US_Crime_Rates/US_Crime_Rates_1960_2014.csv"
crime = pd.read_csv(url)
crime.head()
# ### Step 4. What is the type of the columns?
crime.info()
# ##### Have you noticed that the type of Year is int64. But pandas has a different type to work with Time Series. Let's see it now.
#
# ### Step 5. Convert the type of the column Year to datetime64
crime.Year = pd.to_datetime(crime.Year, format='%Y')
crime.info()
# ### Step 6. Set the Year column as the index of the dataframe
crime.set_index('Year', drop = True)
crime
# ### Step 7. Delete the Total column
del crime['Total']
crime.head()
# ### Step 8. Group the year by decades and sum the values
#
# #### Pay attention to the Population column number, summing this column is a mistake
crimes = crime.resample('10AS').sum()
crimes.head()
# ### Step 9. What is the mos dangerous decade to live in the US?
| 04_Apply/US_Crime_Rates/Exercises.ipynb |
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#export
import io,sys,json,glob
from fastscript import call_parse,Param
from nbdev.imports import Config
from pathlib import Path
# +
# default_exp clean
# -
#hide
#For tests only
from nbdev.imports import *
# # Clean notebooks
#
# > Strip notebooks from superfluous metadata
# To avoid pointless conflicts while working with jupyter notebooks (with different execution counts or cell metadata), it is recommended to clean the notebooks before commiting anything (done automatically if you install the git hooks with `nbdev_install_git_hooks`). The following functions are used to do that.
# ## Utils
# export
def rm_execution_count(o):
"Remove execution count in `o`"
if 'execution_count' in o: o['execution_count'] = None
# export
def clean_cell_output(cell):
"Remove execution count in `cell`"
if 'outputs' in cell:
for o in cell['outputs']: rm_execution_count(o)
#export
cell_metadata_keep = ["hide_input"]
nb_metadata_keep = ["kernelspec", "jekyll", "jupytext", "doc"]
# export
def clean_cell(cell, clear_all=False):
"Clean `cell` by removing superluous metadata or everything except the input if `clear_all`"
rm_execution_count(cell)
if 'outputs' in cell:
if clear_all: cell['outputs'] = []
else: clean_cell_output(cell)
cell['metadata'] = {} if clear_all else {k:v for k,v in cell['metadata'].items() if k in cell_metadata_keep}
# +
tst = {'cell_type': 'code',
'execution_count': 26,
'metadata': {'hide_input': True, 'meta': 23},
'outputs': [{'execution_count': 2, 'output': 'super'}],
'source': 'awesome_code'}
tst1 = tst.copy()
clean_cell(tst)
test_eq(tst, {'cell_type': 'code',
'execution_count': None,
'metadata': {'hide_input': True},
'outputs': [{'execution_count': None, 'output': 'super'}],
'source': 'awesome_code'})
clean_cell(tst1, clear_all=True)
test_eq(tst1, {'cell_type': 'code',
'execution_count': None,
'metadata': {},
'outputs': [],
'source': 'awesome_code'})
# -
# export
def clean_nb(nb, clear_all=False):
"Clean `nb` from superfulous metadata, passing `clear_all` to `clean_cell`"
for c in nb['cells']: clean_cell(c, clear_all=clear_all)
nb['metadata'] = {k:v for k,v in nb['metadata'].items() if k in nb_metadata_keep }
# +
tst = {'cell_type': 'code',
'execution_count': 26,
'metadata': {'hide_input': True, 'meta': 23},
'outputs': [{'execution_count': 2, 'output': 'super'}],
'source': 'awesome_code'}
nb = {'metadata': {'kernelspec': 'some_spec', 'jekyll': 'some_meta', 'meta': 37},
'cells': [tst]}
clean_nb(nb)
test_eq(nb['cells'][0], {'cell_type': 'code',
'execution_count': None,
'metadata': {'hide_input': True},
'outputs': [{'execution_count': None, 'output': 'super'}],
'source': 'awesome_code'})
test_eq(nb['metadata'], {'kernelspec': 'some_spec', 'jekyll': 'some_meta'})
# -
#export
import io,sys,json
# export
def _print_output(nb):
"Print `nb` in stdout for git things"
_output_stream = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False)
_output_stream.write(x)
_output_stream.write("\n")
_output_stream.flush()
# ## Main function
# export
@call_parse
def nbdev_clean_nbs(fname:Param("A notebook name or glob to convert", str)=None,
clear_all:Param("Clean all metadata and outputs", bool)=False,
disp:Param("Print the cleaned outputs", bool)=False,
read_input_stream:Param("Read input stram and not nb folder")=False):
"Clean all notebooks in `fname` to avoid merge conflicts"
#Git hooks will pass the notebooks in the stdin
if read_input_stream and sys.stdin:
input_stream = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
nb = json.load(input_stream)
clean_nb(nb, clear_all=clear_all)
_print_output(nb)
return
if fname is None:
try: path = Config().nbs_path
except Exception as e: path = Path.cwd()
files = path.glob('**/*.ipynb') if fname is None else glob.glob(fname)
for f in files:
if not str(f).endswith('.ipynb'): continue
nb = json.load(open(f, 'r', encoding='utf-8'))
clean_nb(nb, clear_all=clear_all)
if disp: _print_output(nb)
else:
x = json.dumps(nb, sort_keys=True, indent=1, ensure_ascii=False)
with io.open(f, 'w', encoding='utf-8') as f:
f.write(x)
f.write("\n")
# By default (`fname` left to `None`), the all the notebooks in `lib_folder` are cleaned. You can opt in to fully clean the noteobok by removing every bit of metadata and the cell outputs by passing `clear_all=True`. `disp` is only used for internal use with git hooks and will print the clean notebook instead of saving it. Same for `read_input_stream` that will read the notebook from the input stream instead of the file names.
# ## Export -
#hide
from nbdev.export import *
notebook2script()
| test/07_clean.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Tag 1. Kapitel 1. Grundlagen
#
# ## Lektion 7. Indexierung und Ausschneiden von Vektoren
#
# Zur Vektor-Indexierung verwendet man in R "eckige" Klammern.
#
# Angenommen man hat folgende 2 Vektoren gegeben:
v1 <- c(1,10,100,1000)
v2 <- c('o','l','H','l','e')
v1
v2
# Die Indizierung funktioniert dann folgendermaßen: man übergibt in "rechteckigen" Klammern die Position des Elements als Zahl. *Die Indizierung beginnt in R bei einer 1* (dagegen in vielen anderen Sprachen wie z.B. Python oder Java gilt die 0 als Index eines ersten Elements).
# Das dritte Element des v1-Vektors
v1[3]
# Das erste Element des v2-Vektors
v2[1]
# ## Indexierung von mehrereren Elementen
#
# Hier statt einer einzelnen Index-Zahl gibt man den Index-Vektor ab. Zum Beispiel:
v1[c(1,3)]
v2[c(3,5,4,2,1)]
# ## Ausschneiden des Teil-Vektors
#
# Man wendet #einen Doppelpunkt (`:`) an, um ein Stück eines Vektors auszuschneiden. Das Format sieht wie folgt aus:
#
# vector[start_index:stop_index]
#
# Man bekommt damit ein "Stück" des "Vektor-Kuchen" als Ausgabe. Zum Beispiel:
v <- 100:110
v
v[2:7]
v[1:3]
v[8:10]
# Wie man sieht dass sowohl `start_index` als auch `stop_index` sind in der Ausgabe enthalten.
# ## Namen-Indexierung
#
# Nehmen wir als Beispiel nach Wochentagen benannte Temperatur-Messung aus der vorletzten Lektion 4.
temp_week_pateint_1 <- c(36.8,37.2,37.9,37.1,36.6,36.6,36.6)
names(temp_week_pateint_1) <- c('Mo','Di','Mi','Do','Fr','Sa','So')
temp_week_pateint_1
# Dann kann man die bei einem Patient gemessene Temperatur nach Wochentage-Kürzel direkt ansprechen.
temp_week_pateint_1['Mo']
temp_week_pateint_1['Mi']
temp_week_pateint_1['Fr']
# Oder einen ganzen Namensvektor übergeben!
# Achtung! Nicht die origninale Reihenfolge
temp_week_pateint_1[c('Fr','Mi','Mo')]
# ## Vergleichsoperatoren-Auswahl und Filter
#
# Man kann die Vergleichsoperatoren nutzen, um bestimmte Vektor-Elemente auszulesen. Es wird unter anderem als "boolean masking" bezeichnet, da hier ein logischer Vektor erstellt wird, der zum Elementen-Auslese dient. Hier ist ein Beispiel:
temp_week_pateint_1
# Stellen wir fest an welchen Tagen, der Patient erhöhte Temperatur hatte:
# #### Schritt 1.
#
# Zuerst berechnen wir eine "logische Maske" (boolesche Vektor temp_week_pateint_1 > 36.6)
temp_week_pateint_1 > 36.6
# #### Schritt 2.
#
# Nun übergeben wir diesen "logischen" Vektor in den Index-Klammern. Dann bekommt man nur die Werte zurück, für welche der logischer Masken-Wert *TRUE* ist.
temp_week_pateint_1[ temp_week_pateint_1 > 36.6 ]
# Für die Lesbarkeit- und/oder Wiederverwendungszwecken kann man logischen Vektoren einer Filter-Variable zuweisen und diese dann zum Indexieren übergeben. Zum Beispiel:
# Filter Tage mit erhöhter Temperatur
filter_high_temp <- temp_week_pateint_1 > 36.6
filter_high_temp
# Tage mit erhöhter Temperatur
temp_week_pateint_1[ filter_high_temp ]
# Filter für Tage mit normaler Temperatur
filter_norm_temp <- temp_week_pateint_1 == 36.6
filter_norm_temp
# Tage mit normaler Temperatur
temp_week_pateint_1[filter_norm_temp]
# Herzlichen Glückwunsch! Sie sind mit Lektion 7 fertig.
| 1.1 R Basics/de-DE/.ipynb_checkpoints/1.1.07 R - Vector Indexing-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# # Bring Your Own Model with SageMaker Script Mode
# ### Overview
# This notebook will demonstrate how you can bring your own model by using custom training and inference scripts, similar to those you would use outside of SageMaker, with SageMaker's prebuilt containers for various frameworks like Scikit-learn, PyTorch, and XGBoost.
#
# SageMaker Script Mode is flexible so you'll also be seeing examples of how to include your own dependencies, such as a custom Python library, in your training and inference.
#
# The following diagram provides a solution overview:
#
# <img title="SageMaker Script Mode" alt="Solution diagram" src="solution-diagram.jpg">
# ### Prerequisites
# To follow along, you need to create an IAM role, SageMaker Notebook instance, and S3 bucket. You may click on the CloudFormation button which will create the aforementioned resources and clone the `amazon-sagemaker-examples` GitHub repo into the notebook instance. [](https://console.aws.amazon.com/cloudformation/home#/stacks/new?stackName=ScriptModeDemo&templateURL=https://script-mode-blog.s3.amazonaws.com/script-mode-blog-cfn.yml). Give the S3bucket a unique name; you can also give the CloudFormation stack and notebook unique names such as "script mode". You can leave the other default settings in the CloudFormation template.
#
# Once the SageMaker Notebook instance is created, choose `conda_python3` as the kernel.
# ### Imports
import sagemaker
import subprocess
import sys
import random
import math
import pandas as pd
import os
import boto3
import numpy as np
from sklearn.preprocessing import StandardScaler
from sagemaker.pytorch import PyTorch
from sagemaker.xgboost import XGBoost
from sagemaker.sklearn.estimator import SKLearn
from sagemaker.serializers import NumpySerializer, JSONSerializer, CSVSerializer
from sagemaker.deserializers import NumpyDeserializer, JSONDeserializer
from sagemaker.predictor import Predictor
from generate_synthetic_housing_data import *
# Make sure your SageMaker version is updated.
# SageMaker Python SDK version 2.x is required
original_version = sagemaker.__version__
if sagemaker.__version__ != "2.24.1":
subprocess.check_call([sys.executable, "-m", "pip", "install", "sagemaker==2.24.1"])
import importlib
importlib.reload(sagemaker)
# ### Parameters
# +
random.seed(42)
# Useful SageMaker variables
try:
# You're using a SageMaker notebook
sess = sagemaker.Session()
bucket = sess.default_bucket()
role = sagemaker.get_execution_role()
except ValueError:
# You're using a notebook somewhere else
print("Setting role and SageMaker session manually...")
bucket = "bobby-demo"
region = "us-west-2"
iam = boto3.client("iam")
sagemaker_client = boto3.client("sagemaker")
sagemaker_execution_role_name = (
"AmazonSageMaker-ExecutionRole-20200630T141851" # Change this to your role name
)
role = iam.get_role(RoleName=sagemaker_execution_role_name)["Role"]["Arn"]
boto3.setup_default_session(region_name=region, profile_name="default")
sess = sagemaker.Session(sagemaker_client=sagemaker_client, default_bucket=bucket)
# Local data paths
train_dir = os.path.join(os.getcwd(), "data/train")
test_dir = os.path.join(os.getcwd(), "data/test")
os.makedirs(train_dir, exist_ok=True)
os.makedirs(test_dir, exist_ok=True)
# Data paths in S3
s3_prefix = "script-mode-workflow"
csv_s3_prefix = f"{s3_prefix}/csv"
csv_s3_uri = f"s3://{bucket}/{s3_prefix}/csv"
numpy_train_s3_prefix = f"{s3_prefix}/numpy/train"
numpy_train_s3_uri = f"s3://{bucket}/{numpy_train_s3_prefix}"
numpy_test_s3_prefix = f"{s3_prefix}/numpy/test"
numpy_test_s3_uri = f"s3://{bucket}/{numpy_test_s3_prefix}"
csv_train_s3_uri = f"{csv_s3_uri}/train"
csv_test_s3_uri = f"{csv_s3_uri}/test"
# Enable Local Mode training
enable_local_mode_training = False
# Endpoint names
sklearn_endpoint_name = "randomforestregressor-endpoint"
pytorch_endpoint_name = "pytorch-endpoint"
xgboost_endpoint_name = "xgboost-endpoint"
# -
# !wget -q https://raw.githubusercontent.com/aws-samples/amazon-sagemaker-script-mode/master/local_mode_setup.sh
# !wget -q https://raw.githubusercontent.com/aws-samples/amazon-sagemaker-script-mode/master/daemon.json
# !/bin/bash ./local_mode_setup.sh
# ### Prepare Synthetic Housing Data
# For all the examples below, we'll be generating a synthetic housing dataset.
# +
df = generate_houses(1506)
# Get training columns
train_cols = list(df.columns)
del train_cols[-1]
train_cols
# Split data
training_index = math.floor(0.8 * df.shape[0])
x_train, y_train = df[train_cols][:training_index], df.PRICE[:training_index]
x_test, y_test = df[train_cols][training_index:], df.PRICE[training_index:]
# Scale price
y_train = y_train / 100000
y_test = y_test / 100000
# Standardize data
x_train_np = StandardScaler().fit_transform(x_train)
x_test_np = StandardScaler().fit_transform(x_test)
# -
x_train.head()
# Rearrange dataframe for SageMaker training and scale price.
# +
train_df = pd.DataFrame(data=x_train_np)
train_df.columns = x_train.columns
train_df["PRICE"] = y_train / 100000
first_col = train_df.pop("PRICE")
train_df.insert(0, "PRICE", first_col)
test_df = pd.DataFrame(data=x_test_np)
test_df.columns = x_test.columns
test_df["PRICE"] = y_test.reset_index(drop=True) / 100000
first_col = test_df.pop("PRICE")
test_df.insert(0, "PRICE", first_col)
# -
# Save as both CSV and Numpy data types to demonstrate data type flexibility in model training.
# +
# Save as CSV
train_df.to_csv(f"{train_dir}/train.csv", header=False, index=False)
test_df.to_csv(f"{test_dir}/test.csv", header=False, index=False)
# Save as Numpy
np.save(os.path.join(train_dir, "x_train.npy"), x_train_np)
np.save(os.path.join(test_dir, "x_test.npy"), x_test_np)
np.save(os.path.join(train_dir, "y_train.npy"), y_train)
np.save(os.path.join(test_dir, "y_test.npy"), y_test)
# -
# Upload the data to S3
s3_resource_bucket = boto3.Session().resource("s3").Bucket(bucket)
s3_resource_bucket.Object(os.path.join(csv_s3_prefix, "train.csv")).upload_file(
"data/train/train.csv"
)
s3_resource_bucket.Object(os.path.join(csv_s3_prefix, "test.csv")).upload_file("data/test/test.csv")
s3_resource_bucket.Object(os.path.join(numpy_train_s3_prefix, "x_train.npy")).upload_file(
"data/train/x_train.npy"
)
s3_resource_bucket.Object(os.path.join(numpy_train_s3_prefix, "y_train.npy")).upload_file(
"data/train/y_train.npy"
)
s3_resource_bucket.Object(os.path.join(numpy_test_s3_prefix, "x_test.npy")).upload_file(
"data/test/x_test.npy"
)
s3_resource_bucket.Object(os.path.join(numpy_test_s3_prefix, "y_test.npy")).upload_file(
"data/test/y_test.npy"
)
# ### Scikit-learn
# The first "level" of script mode is the ability to define your own training job, model, and inference process without any dependencies. This is done using a customized python script and pointing that script as the "entry point" when defining your SageMaker training estimator. There is no "out-of-the-box" random forest algorithm on SageMaker, but there is support for scikit-learn containers which does have random forest implementations, including regressors and classifiers. Here, we demonstrate the implementation of a custom random forest regressor to predict housing prices using our synthetic housing data set.
#
# Script Mode in SageMaker allows you to take control of the training and inference process without having to go through the trouble of creating and maintaining your own docker containers. For example, if you want to use a scikit-learn algorithm, just use the AWS-provided scikit-learn container and pass it your own training and inference code. On your behalf, the SageMaker Python SDK will package this entry point script (which can be your training and/or inference code), upload it to S3, and set two environment variables that are read at runtime and load the custom training and inference functions from the entry point script. These two environment variables are `SAGEMAKER_SUBMIT_DIRECTORY` which is set to the S3 path of the package and `SAGEMAKER_PROGRAM` which is set to the name of the script (which in our case is `train_deploy_scikitlearn_without_dependencies.py`).
#
# The process is the same if you want to use an XGBoost model (use the XGBoost container) or a custom PyTorch model (use the PyTorch container). Since you're passing in your own script (which is why we call it "script mode"), you get to define the model, the training process, and the inference process as well.
#
# Below we include an entry point script called `train_deploy_scikitlearn_without_dependencies.py` which contains our custom training and inference code.
# +
hyperparameters = {"max_depth": 20, "n_jobs": 4, "n_estimators": 120}
if enable_local_mode_training:
train_instance_type = "local"
inputs = {"train": f"file://{train_dir}", "test": f"file://{test_dir}"}
else:
train_instance_type = "ml.c5.xlarge"
inputs = {"train": csv_train_s3_uri, "test": csv_test_s3_uri}
estimator_parameters = {
"entry_point": "train_deploy_scikitlearn_without_dependencies.py",
"source_dir": "scikitlearn_script",
"framework_version": "0.23-1",
"py_version": "py3",
"instance_type": train_instance_type,
"instance_count": 1,
"hyperparameters": hyperparameters,
"role": role,
"base_job_name": "randomforestregressor-model",
}
estimator = SKLearn(**estimator_parameters)
estimator.fit(inputs)
# -
# After the estimator finishes training, we can deploy it to a SageMaker endpoint.
existing_endpoints = sess.sagemaker_client.list_endpoints(
NameContains=sklearn_endpoint_name, MaxResults=30
)["Endpoints"]
if not existing_endpoints:
sklearn_predictor = estimator.deploy(
initial_instance_count=1, instance_type="ml.m5.xlarge", endpoint_name=sklearn_endpoint_name
)
else:
sklearn_predictor = Predictor(
endpoint_name="randomforestregressor-endpoint",
sagemaker_session=sess,
serializer=NumpySerializer(),
deserializer=NumpyDeserializer(),
)
# Then we can use the SageMaker endpoint to make predictions.
sklearn_predictor.predict(x_test)
# ### PyTorch
# The second "level" of script mode is the ability to modularize and logically organize your custom training jobs, models, and inference processes.
#
# Sometimes keeping all your code in one Python file can be unwieldy. Script Mode gives you the flexibility to parse out your code into multiple Python files. To illustrate this feature we build a custom PyTorch model and logically separate the model definition from the the training and inference logic. This is done by stipulating the source directory when defining your SageMaker training estimator (illustrated below). Once again, the model is not supported "out-of-the-box", but the PyTorch framework is and can be leveraged in the same manner as scikit-learn was in the previous example.
#
# In this PyTorch example, we want to separate the actual neural network definition from the rest of the code by putting it into its own file as demonstrated in the `pytorch_script/` folder.
# +
hyperparameters = {"epochs": 25, "batch_size": 128, "learning_rate": 0.01}
if enable_local_mode_training:
train_instance_type = "local"
inputs = {"train": f"file://{train_dir}", "test": f"file://{test_dir}"}
else:
train_instance_type = "ml.c5.xlarge"
inputs = {"train": numpy_train_s3_uri, "test": numpy_test_s3_uri}
estimator_parameters = {
"entry_point": "train_deploy_pytorch_without_dependencies.py",
"source_dir": "pytorch_script",
"instance_type": train_instance_type,
"instance_count": 1,
"hyperparameters": hyperparameters,
"role": role,
"base_job_name": "pytorch-model",
"framework_version": "1.5",
"py_version": "py3",
}
estimator = PyTorch(**estimator_parameters)
estimator.fit(inputs)
# -
# Again, after the estimator finishes training, we can deploy it to a SageMaker endpoint.
existing_endpoints = sess.sagemaker_client.list_endpoints(
NameContains=pytorch_endpoint_name, MaxResults=30
)["Endpoints"]
if not existing_endpoints:
pytorch_predictor = estimator.deploy(
initial_instance_count=1, instance_type="ml.m5.xlarge", endpoint_name=pytorch_endpoint_name
)
else:
pytorch_predictor = Predictor(
endpoint_name="pytorch-endpoint",
sagemaker_session=sess,
serializer=JSONSerializer(),
deserializer=JSONDeserializer(),
)
# Then we can use the endpoint to make predictions.
# +
pytorch_predictor.serializer = JSONSerializer()
pytorch_predictor.deserializer = JSONDeserializer()
pytorch_predictor.predict(x_test.values[0])
# -
# ### XGBoost
# The third "level" of script mode is the ability to bring your own libraries and dependencies to support custom functionality within your models, training jobs, and inference processes. This supercharges your customization options, and allows you to import libraries you have created yourself or Python packages hosted on PyPi.
#
# Perhaps the number of Python files you have is becoming unwieldy now or you want more organization. In this scenario, you might be tempted to create your own Python library. Or maybe you wish to implement a function not currently supported by SageMaker in the training phase (such as k-fold cross validation).
#
# Script Mode supports adding custom libraries and those libraries don't have to be in the same directory as your entry point Python script. You simply need to stipulate the custom library or other dependencies when defining your SageMaker training estimator (illustrated below). SageMaker will copy the library folder to the same folder where the entry point script is located when the training job is kicked off.
#
# In this example, we implement k-fold cross validation for an XGBoost model using a custom built library called `my_custom_library`. While XGBoost is supported "out-of-the-box" on SageMaker, that version does not support k-fold cross validation for training. Thus we use script mode to leverage the supported XGBoost container and the concomitant flexibility to include our custom libraries and dependencies.
#
# +
hyperparameters = {"num_round": 6, "K": 5}
if enable_local_mode_training:
train_instance_type = "local"
inputs = {"train": f"file://{train_dir}"}
else:
train_instance_type = "ml.c5.xlarge"
inputs = {"train": csv_s3_uri}
estimator_parameters = {
"entry_point": "train_deploy_xgboost_with_dependencies.py",
"source_dir": "xgboost_script",
"dependencies": ["my_custom_library"],
"instance_type": train_instance_type,
"instance_count": 1,
"hyperparameters": hyperparameters,
"role": role,
"base_job_name": "xgboost-model",
"framework_version": "1.0-1",
"py_version": "py3",
}
estimator = XGBoost(**estimator_parameters)
estimator.fit(inputs)
# -
# After we train the model with k-fold cross validation, we can deploy it to a SageMaker endpoint.
existing_endpoints = sess.sagemaker_client.list_endpoints(
NameContains=xgboost_endpoint_name, MaxResults=30
)["Endpoints"]
existing_endpoints = sess.sagemaker_client.list_endpoints(
NameContains=xgboost_endpoint_name, MaxResults=30
)["Endpoints"]
if not existing_endpoints:
xgboost_predictor = estimator.deploy(
initial_instance_count=1, instance_type="ml.m5.xlarge", endpoint_name=xgboost_endpoint_name
)
else:
xgboost_predictor = Predictor(
endpoint_name="xgboost-endpoint",
sagemaker_session=sess,
serializer=CSVSerializer(),
deserializer=JSONDeserializer(),
)
# Then you can use the endpoint to make predictions.
xgboost_predictor.serializer = CSVSerializer()
xgboost_predictor.deserializer = JSONDeserializer()
xgboost_predictor.predict(x_test.values[0])[0]
# ### Cleanup
# +
resources = (
[sklearn_endpoint_name, sklearn_predictor],
[pytorch_endpoint_name, pytorch_predictor],
[xgboost_endpoint_name, xgboost_predictor],
)
for resource in resources:
existing_endpoints = sess.sagemaker_client.list_endpoints(
NameContains=resource[0], MaxResults=30
)["Endpoints"]
if existing_endpoints:
resource[1].delete_endpoint(delete_endpoint_config=True)
| sagemaker-script-mode/sagemaker-script-mode.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 3. Generating Multiple Samples using MS1 Controller
# In this notebook, we demonstrate how ViMMS can be used to generate multiple samples (sets of chemicals) that are biological and technical replicates. The MS1 controller is then used to produce mass spectral data in form of .mzML files for the multiple samples.
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
import pandas as pd
from collections import defaultdict
import os
from pathlib import Path
import sys
sys.path.append('../..')
from vimms.Chemicals import ChemicalCreator, MultiSampleCreator
from vimms.MassSpec import IndependentMassSpectrometer
from vimms.Controller import SimpleMs1Controller
from vimms.Environment import Environment
from vimms.Common import *
# Load previously trained KDEs in `PeakSampler` and the list of extracted metabolites, created in **01. Download Data.ipynb**.
base_dir = os.path.abspath('example_data')
ps = load_obj(Path(base_dir, 'peak_sampler_mz_rt_int_19_beers_fullscan.p'))
hmdb = load_obj(Path(base_dir, 'hmdb_compounds.p'))
# Set ViMMS logging level
set_log_level_warning()
# set_log_level_info()
# set_log_level_debug()
# ## Create Initial Chemical
# Define an output folder containing our results
out_dir = Path(base_dir, 'results', 'MS1_multiple')
# Here we generate multiple chemical objects that will be used across samples. The chemical objects are generated by sampling from metabolites in the HMDB database.
# +
# the list of ROI sources created in the previous notebook '01. Download Data.ipynb'
ROI_Sources = [str(Path(base_dir,'DsDA', 'DsDA_Beer', 'beer_t10_simulator_files'))]
# minimum MS1 intensity of chemicals
min_ms1_intensity = 1.75E5
# m/z and RT range of chemicals
rt_range = [(400, 800)]
mz_range = [(100, 400)]
# the number of chemicals in the sample
n_chems = 1000
# maximum MS level (we do not generate fragmentation peaks when this value is 1)
ms_level = 1
# for this experiment, we restrict the sampled chromatograms to be within 20 - 40s in length
# so they are not too big and too small
roi_rt_range = [20, 40]
# -
chems = ChemicalCreator(ps, ROI_Sources, hmdb)
dataset = chems.sample(mz_range, rt_range, min_ms1_intensity, n_chems, ms_level, roi_rt_range=roi_rt_range)
save_obj(dataset, Path(out_dir, 'BaseDataset', 'dataset.p'))
for chem in dataset[0:10]:
print(chem)
# ## Create Multiple Samples
# The next section allows us to define classes of biological replicates, each having multiple technical replicates.
#
# Below we create two biological classes ('class0', 'class1'), each having 10 technical replicates with some noise on the chemical's intensity.
n_samples = [10, 10] # number of files per class
classes = ["class%d" % i for i in range(len(n_samples))] # creates default list of classes
intensity_noise_sd = [1000] # noise on max intensity
classes
# Add intensity changes between different classes
change_probabilities = [0 for i in range(len(n_samples))] # probability of intensity changes between different classes
change_differences_means = [0 for i in range(len(n_samples))] # mean of those intensity changes
change_differences_sds = [0 for i in range(len(n_samples))] # SD of those intensity changes
# Add experimental variables (examples in comments)
experimental_classes = None # [["male","female"],["Positive","Negative","Unknown"]]
experimental_probabilitities = None # [[0.5,0.5],[0.33,0.33,0.34]]
experimental_sds = None # [[250],[250]]
# Dropout chemicals in different classes
# +
# drop-out chemicals by their probabilities
dropout_probability = 0.2
dropout_probabilities = [dropout_probability for i in range(len(n_samples))]
dropout_numbers = None # drop-out chemicals by an absolute number
# dropout_probabilities = None
# dropout_numbers = 2 # number of chemicals dropped out in each class
# -
# Generate multiple samples
save_location = os.path.join(out_dir, 'ChemicalFiles')
multiple_samples = MultiSampleCreator(dataset, n_samples, classes, intensity_noise_sd,
change_probabilities, change_differences_means, change_differences_sds, dropout_probabilities, dropout_numbers,
experimental_classes, experimental_probabilitities, experimental_sds, save_location=save_location)
total_samples = np.sum(multiple_samples.n_samples)
total_samples
# We can also print the chemicals that are missing (removed by drop-out) in each class.
save_obj(multiple_samples.missing_chemicals, Path(out_dir, 'MissingChemicals', 'missing_chemicals.p'))
multiple_samples.missing_chemicals
# ## Run MS1 controller on the samples and generate .mzML files
# We can now take the multiple samples created above and generate mass spectral data (.mzML files) using the MS1 controller in ViMMS.
# +
min_rt = rt_range[0][0]
max_rt = rt_range[0][1]
controllers = defaultdict(list)
controller_to_mzml = {}
mzml_dir = Path(out_dir, 'mzMLFiles')
num_classes = len(n_samples)
sample_idx = 0
for j in range(num_classes): # loop over classes
num_samples = n_samples[j]
for i in range(num_samples): # loop over samples for each class
# load the sample
fname = Path(save_location, 'sample_%d.p' % sample_idx)
sample = load_obj(fname)
sample_idx += 1
# define output .mzML filename
out_file = 'number_%d_class_%d.mzML' % (i, j)
out_path = Path(mzml_dir, out_file)
# run it through the MS1 controller
mass_spec = IndependentMassSpectrometer(POSITIVE, sample, ps)
controller = SimpleMs1Controller()
# create an environment to run both the mass spec and controller
env = Environment(mass_spec, controller, min_rt, max_rt, progress_bar=True)
# set the log level to WARNING so we don't see too many messages when environment is running
set_log_level_warning()
# run the simulation
env.run()
set_log_level_debug()
env.write_mzML(mzml_dir, out_file)
# save the resulting controller
controllers[j].append(controller)
controller_to_mzml[controller] = (j, out_file, )
# -
# ## Print out the missing peaks
# The controller object contains all the information about the state of the mass spectrometry process over time. Below we demonstrate this by generating a report of peaks corresponding to a chemical that are present in one class but is missing from the other class. This can be useful in the benchmark evaluation of peak picking or alignment algorithms.
def get_chem_to_peaks(controller):
chem_to_peaks = defaultdict(list)
frag_events = controller.environment.mass_spec.fragmentation_events
for frag_event in frag_events:
chem = frag_event.chem
peaks = frag_event.peaks
chem_to_peaks[chem].extend(peaks)
return chem_to_peaks
for controller, (current_class, mzml_filename) in controller_to_mzml.items():
controller_peaks = get_chem_to_peaks(controller)
basename = os.path.basename(mzml_filename)
front, back = os.path.splitext(mzml_filename)
outfile = front + '.csv'
missing_peaks = []
for other_class in range(num_classes):
if current_class == other_class:
continue
# get the peaks that are present in current_class but missing in other_class
missing_chems = multiple_samples.missing_chemicals[other_class]
for chem in missing_chems:
peaks = controller_peaks[chem]
for peak in peaks:
row = (chem.formula.formula_string, current_class, other_class, peak.mz, peak.rt, peak.intensity)
missing_peaks.append(row)
# convert to dataframe
columns = ['formula', 'present_in', 'missing_in', 'mz', 'RT', 'intensity']
missing_df = pd.DataFrame(missing_peaks, columns=columns)
missing_df.to_csv(os.path.join(out_dir, 'MissingChemicals', os.path.basename(outfile)))
missing_df.head(20)
| examples/01. vimms (Wandy et al 2019)/03. Multiple Samples Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Dynamical Power Spectra (on real data)
# %matplotlib inline
# +
# load auxiliary libraries
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
# import stingray
import stingray
plt.style.use('seaborn-talk')
# -
# # All starts with a lightcurve..
# Open the event file with astropy.io.fits
f = fits.open('emr_cleaned.fits')
# The time resolution is stored in the header of the first extension under the Keyword `TIMEDEL`
dt = f[1].header['TIMEDEL']
# The collumn `TIME` of the first extension stores the time of each event
toa = f[1].data['Time']
# Let's create a Lightcurve from the Events time of arrival witha a given time resolution
lc = stingray.Lightcurve.make_lightcurve(toa=toa, dt=dt)
lc.plot()
# ---
# # DynamicPowerspectrum
# Let's create a dynamic powerspectrum with the a segment size of 16s and the powers with a "leahy" normalization
dynspec = stingray.DynamicalPowerspectrum(lc=lc, segment_size=16, norm='leahy')
# The dyn_ps attribute stores the power matrix, each column corresponds to the powerspectrum of each segment of the light curve
dynspec.dyn_ps
# To plot the DynamicalPowerspectrum matrix, we use the attributes `time` and `freq` to set the extend of the image axis. have a look at the documentation of matplotlib's `imshow()`.
# +
extent = min(dynspec.time), max(dynspec.time), max(dynspec.freq), min(dynspec.freq)
plt.imshow(dynspec.dyn_ps, origin="lower left", aspect="auto", vmin=1.98, vmax=3.0,
interpolation="none", extent=extent)
plt.colorbar()
plt.ylim(700,850)
# -
print("The dynamical powerspectrun has {} frequency bins and {} time bins".format(len(dynspec.freq), len(dynspec.time)))
# ---
# # Rebinning in Frequency
print("The current frequency resolution is {}".format(dynspec.df))
# Let's rebin to a frequency resolution of 2 Hz and using the average of the power
dynspec.rebin_frequency(df_new=2.0, method="average")
print("The new frequency resolution is {}".format(dynspec.df))
# Let's see how the Dynamical Powerspectrum looks now
extent = min(dynspec.time), max(dynspec.time), min(dynspec.freq), max(dynspec.freq)
plt.imshow(dynspec.dyn_ps, origin="lower", aspect="auto", vmin=1.98, vmax=3.0,
interpolation="none", extent=extent)
plt.colorbar()
plt.ylim(500, 1000)
extent = min(dynspec.time), max(dynspec.time), min(dynspec.freq), max(dynspec.freq)
plt.imshow(dynspec.dyn_ps, origin="lower", aspect="auto", vmin=2.0, vmax=3.0,
interpolation="none", extent=extent)
plt.colorbar()
plt.ylim(700,850)
# # Rebin time
# Let's try to improve the visualization by rebinnin our matrix in the time axis
print("The current time resolution is {}".format(dynspec.dt))
# Let's rebin to a time resolution of 64 s
dynspec.rebin_time(dt_new=64.0, method="average")
print("The new time resolution is {}".format(dynspec.dt))
extent = min(dynspec.time), max(dynspec.time), min(dynspec.freq), max(dynspec.freq)
plt.imshow(dynspec.dyn_ps, origin="lower", aspect="auto", vmin=2.0, vmax=3.0,
interpolation="none", extent=extent)
plt.colorbar()
plt.ylim(700,850)
# # Trace maximun
# Let's use the method `trace_maximum()` to find the index of the maximum on each powerspectrum in a certain frequency range. For example, between 755 and 782Hz)
tracing = dynspec.trace_maximum(min_freq=755, max_freq=782)
# This is how the trace function looks like
plt.plot(dynspec.time, dynspec.freq[tracing], color='red', alpha=1)
plt.show()
# Let's plot it on top of the dynamic spectrum
extent = min(dynspec.time), max(dynspec.time), min(dynspec.freq), max(dynspec.freq)
plt.imshow(dynspec.dyn_ps, origin="lower", aspect="auto", vmin=2.0, vmax=3.0,
interpolation="none", extent=extent, alpha=0.7)
plt.colorbar()
plt.ylim(740,800)
plt.plot(dynspec.time, dynspec.freq[tracing], color='red', lw=3, alpha=1)
plt.show()
# The spike at 400 Hz is probably a statistical fluctutations, tracing by the maximum power can be dangerous!
#
# We will implement better methods in the future, stay tunned ;)
| DynamicalPowerspectrum/DynamicalPowerspectrum_tutorial_[real_data].ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Trends in Delayed Flights Dataset
# ## by <NAME>
# + [markdown] slideshow={"slide_type": "slide"}
# ## Investigation Overview
#
# In exploring this dataset, I set out to find any observable trends in the arrival delay time and delay causes, in relation to the other variables in the dataset.
#
# ## Dataset Overview
#
# The dataset contains information on the approximately 1,936,756 domestic flights within the United States during the year 2008. Additional datasets for interpreting carrier codes and iata codes were downloaded from The Bureau of Transportation Statistics website. 2 duplicate rows were removed. The dataset was otherwise left intact.
#
# + slideshow={"slide_type": "skip"}
# import all packages and set plots to be embedded inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
# %matplotlib inline
# suppress warnings from final output
import warnings
warnings.simplefilter("ignore")
# + slideshow={"slide_type": "skip"}
# load in the dataset into a pandas dataframe
# Import dataset csv file
df = pd.read_csv('DelayedFlights.csv', index_col=0)
# import additional files to assist in column formatting
df_carriers = pd.read_csv('carriers.csv')
df_airports = pd.read_csv('airports.csv')
# + slideshow={"slide_type": "skip"}
# data wrangling, changing datatype, tidying date columns, et cetera
df['CRSDepTime'] = pd.to_datetime(df['CRSDepTime'].apply(int).apply(str).apply(lambda x: x.zfill(4)), format='%H%M').dt.time
df.rename(columns={'DayofMonth':'Day'}, inplace=True)
df['FlightDate'] = pd.to_datetime(df[['Year','Month','Day']])
df['FlightDate'] = pd.to_datetime(df.FlightDate.astype(str) + ' ' + df.CRSDepTime.astype(str))
df.loc[df['ArrDelay'] < 15.0, 'FlightStatus'] = 1
df.loc[df['ArrDelay'] >= 15.0, 'FlightStatus'] = 2
df.loc[df['Diverted'] == 1, 'FlightStatus'] = 3
df.loc[df['Cancelled'] == 1, 'FlightStatus'] = 4
df['FlightStatus'] = df['FlightStatus'].astype(int)
status_dict = {1:'On Time',2:'Late',3:'Diverted',4:'Cancelled'}
df[['FlightStatus','CancellationCode']] = df[['FlightStatus','CancellationCode']].astype('category')
# + slideshow={"slide_type": "skip"}
# drop duplicated rows
df.drop_duplicates(inplace=True)
# + slideshow={"slide_type": "skip"}
# drop unwanted columns
df = df.drop(columns=['Year','Month','Day','DayOfWeek','DepTime','ArrTime','CRSArrTime','FlightNum',
'TailNum','ActualElapsedTime','CRSElapsedTime','AirTime','DepDelay','Distance','TaxiIn',
'TaxiOut','Cancelled','Diverted'], axis=1)
# -
# Setting base parameters for plots
plt.rcParams.update({'figure.figsize': (14, 7), 'figure.dpi': 150})
base_color = sb.color_palette()[0]
# calculate appropriate bin size for ArrDelay using Doane's formula and numpy
bins = len(np.histogram_bin_edges(df.loc[~df.ArrDelay.isna()].ArrDelay, bins='doane'))
#bins=int(np.around(np.sqrt(df.shape[0])))
# set whitegrid as default figure style
sb.set_style('whitegrid')
# + [markdown] slideshow={"slide_type": "skip"}
# > Note that the above cells have been set as "Skip"-type slides. That means
# that when the notebook is rendered as http slides, those cells won't show up.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Distribution of Arrival Delay
#
# A histogram of arrival delay with extreme high outliers removed gives it a less skewed apearance. The highest (2461 minutes) and lowest (-109 minutes) values in the modified distribution are marked with the blue and red vertical lines.
#
# + slideshow={"slide_type": "subslide"}
base_color = sb.color_palette()[0]
df_a = df.loc[df['ArrDelay'] < df.ArrDelay.quantile(0.995)]
bins_a = len(np.histogram_bin_edges(df_a.loc[~df_a.ArrDelay.isna()].ArrDelay, bins='doane'))
g = sb.distplot(df_a['ArrDelay'], bins=bins_a, kde=False, color=base_color);
g.axvline(x=df_a['ArrDelay'].max(), color='b', label='Max');
g.axvline(x=df_a['ArrDelay'].min(), color='r', label='Min');
plt.title('Distrubution of Arrival delay below 99.5th percentile');
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mean Duration of each cause
#
# Carrier, National Air System and Late Aircraft were the 3 delay caused with the highest mean delay. Weather and Security caused mean delays less than 5 minutes in 2008.
#
# + slideshow={"slide_type": "subslide"}
delay_list = ['CarrierDelay','WeatherDelay','NASDelay','SecurityDelay','LateAircraftDelay']
g = df[delay_list].mean().plot(kind='bar')
for p in g.patches:
x=p.get_bbox().get_points()[:,0]
y=p.get_bbox().get_points()[1,1]
g.annotate('{:.2f}'.format(y), (x.mean(), y),
ha='center', va='bottom')
plt.ylabel('Mean Duration')
plt.xticks(rotation=0)
plt.title('Mean Duration of Delay per Cause');
plt.xlabel('Cause of Delay');
# + [markdown] slideshow={"slide_type": "slide"}
# ## Mean Arrival Delay by Carrier
#
# In this barchart, arranged in order of descending total flights operated, we see that carriers with fewer flights operated have a wider distribution of arrival delays. This is shown by the length of the vertical black lines above each bar.
# The 8 carriers with the lowest flights operated show a relationship with decreasing mean arrival delay.
#
# The carrier codes and corresponding full carrier names are displayed above the chart
# + slideshow={"slide_type": "skip"}
carrier_dict = dict(zip(df_carriers.Code, df_carriers.Description))
# + slideshow={"slide_type": "subslide"}
sb.barplot(data=df, x='UniqueCarrier', y='ArrDelay', order=df.UniqueCarrier.value_counts().index, color=sb.color_palette()[0]);
plt.title('Mean Arrival Delay by Carrier');
plt.xticks(rotation=0);
print([(i, carrier_dict[i]) for i in list(df.groupby('UniqueCarrier')['ArrDelay'].mean().index)])
# + [markdown] slideshow={"slide_type": "slide"}
# ## Total Delay by Cause per hour
#
# This 15 minute rolling window linechart of total delay duration per hour for each delay cause shows that Carrier delays cause majority of the delays between the hours of 6am and 11am, while Late Aircraft cause majority of delays between 11am and 11pm. There are nearly zero flights between 11pm and 6am, hence the near zero total delay in that section of the chart.
#
# + slideshow={"slide_type": "subslide"}
import datetime as dt
g = df.groupby(df.FlightDate.dt.time)['CarrierDelay','NASDelay','LateAircraftDelay'].sum().rolling(15).mean().shift(-7).plot()
g.legend(loc='upper center', ncol=3, fancybox=True, shadow=True)
hours = [dt.time(i).strftime('%H:%M') for i in range(24)]
plt.xticks(hours, hours, rotation=20);
plt.title('Total Delay per Cause per hour in 2008 (15 minute rolling window)');
plt.xlabel('Scheduled Flight Time');
plt.ylabel('Total Delay (minutes)');
# + [markdown] slideshow={"slide_type": "skip"}
# > Once you're ready to finish your presentation, check your output by using
# nbconvert to export the notebook and set up a server for the slides. From the
# terminal or command line, use the following expression:
# > > `jupyter nbconvert <file_name>.ipynb --to slides --post serve --template output_toggle`
#
# > This should open a tab in your web browser where you can scroll through your
# presentation. Sub-slides can be accessed by pressing 'down' when viewing its parent
# slide. Make sure you remove all of the quote-formatted guide notes like this one
# before you finish your presentation!
| 5 - communicate data findings project/slide_delayed_flights.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Lesson 03 Simple metrics and grid size
# *This lesson made by <NAME> and last updated 22/11/2021*
# In this lesson we are going to look at some simple topographic metrics and how the grid size affects these metrics.
#
# **Side note**: The grid cell size in a geospatial raster is the length of the edges of each pixel. This is often referred to as the "resolution" but technically the resolution is defined as the minimum distance between two objects that can be separated in an image, so the resolution of a raster is larger than the grid cell size. [You can read about that here](https://www.semanticscholar.org/paper/The-differentiation-between-grid-spacing-and-and-to-Grasso/23ed9f85a67dea01eb6501701478555721b6af13).
#
# You will need to have data files from the previous lessons, so if you have not done those lessons please open them and execute the cells (the quick way to do that is to use the `Restart & run all` button in the `Kernel` menu above).
# ## First import some stuff we need
# First we make sure lsdviztools version is updated (it needs to be > 0.4.7):
# !pip install lsdviztools --upgrade
# Now import stuff we need.
import lsdviztools.lsdbasemaptools as bmt
from lsdviztools.lsdplottingtools import lsdmap_gdalio as gio
import lsdviztools.lsdmapwrappers as lsdmw
import rasterio as rio
from rasterio.plot import show
import matplotlib.pyplot as plt
# ## Grab some (more) data
# We are going to grab a few more datasets. These will be in the same area, but they will be a 90 m dataset and a different 30 m dataset. They require 2 different calls to the `ot_scraper` in `lsdviztools`.
lower_left = [36.990554387425014, -2.318307057720176]
upper_right = [37.23367133834253, -1.8425313329873874]
# This downloads 90m (3 arcsecond) SRTM
Aguas_DEM = bmt.ot_scraper(source = "SRTMGL3",
lower_left_coordinates = lower_left,
upper_right_coordinates = upper_right,
prefix = "rio_aguas")
Aguas_DEM.print_parameters()
Aguas_DEM.download_pythonic()
# This downloads ALOS World 3D 30m
Aguas_DEM = bmt.ot_scraper(source = "AW3D30",
lower_left_coordinates = lower_left,
upper_right_coordinates = upper_right,
prefix = "rio_aguas")
Aguas_DEM.print_parameters()
Aguas_DEM.download_pythonic()
# ## Warp these three datasets into UTM coordinates with 30 and 90 m pixel spacing
# We are going to use gdal to warp the three DEMs. You can use the syntax from lesson 2. I've looked up the UTM zone for you.
# !gdalwarp -t_srs EPSG:32630 rio_aguas_SRTMGL1.tif RA_SRTM_UTM.tif -r cubic -tr 30 30
# !gdalwarp -t_srs EPSG:32630 rio_aguas_AW3D30.tif RA_AW3D30_UTM.tif -r cubic -tr 30 30
# !gdalwarp -t_srs EPSG:32630 rio_aguas_SRTMGL3.tif RA_SRTM3_UTM.tif -r cubic -tr 90 90
# ## Get the hillshades
# !gdaldem hillshade RA_SRTM_UTM.tif RA_SRTM_UTM_HS.tif -alg ZevenbergenThorne
# !gdaldem hillshade RA_SRTM3_UTM.tif RA_SRTM3_UTM_HS.tif -alg ZevenbergenThorne
# !gdaldem hillshade RA_AW3D30_UTM.tif RA_AW3D30_UTM_HS.tif -alg ZevenbergenThorne
# We will use gdal to get the hillshades as well.
# ## Plot some data with rasterio
# Lets look at the hillshades. This time we will plot with the `rasterio show`. First load the datasets
SA_SRTM_hs = rio.open("RA_SRTM_UTM_HS.tif")
SA_SRTM3_hs = rio.open("RA_SRTM3_UTM_HS.tif")
SA_AW3D_hs = rio.open("RA_AW3D30_UTM_HS.tif")
# +
# %matplotlib inline
f, (ax1,ax2,ax3) = plt.subplots(3, 1)
f.set_size_inches(10.5, 18)
show(SA_SRTM_hs, ax=ax1, cmap='gray', title = "SRTM 30m")
show(SA_SRTM3_hs,ax=ax2, cmap='gray', title = "SRTM 90m")
show(SA_AW3D_hs, ax=ax3, cmap='gray', title = "AW3D 30m")
# -
# Right, what can you see here? Hopefully the difference between SRTM1 and SRTM3 is obvious. Less obvious is the difference between SRTM1 and AW3D. We will need to zoom in to see that. To do that we need to use the subscripting functionality of `rasterio` (which allows us to subsample the underlying array)
# +
f, (ax1,ax2) = plt.subplots(1, 2)
f.set_size_inches(18.5, 10)
show(SA_SRTM_hs.read(1)[300:700,600:900], ax=ax1, transform=SA_SRTM_hs.transform, cmap='gray', title = "SRTM 30m")
show(SA_AW3D_hs.read(1)[300:700,600:900], ax=ax2, transform=SA_AW3D_hs.transform, cmap='gray', title = "AW3D 30m")
# -
# Okay, so hopefully you can see from the above images that not all DEMs are created equal. SRTM was a total breakthrough in terms of producing global topographic data. But the radar only imaged the surface over a very short period so the accuracy is not as good as DEMs based on many years of stacked images (such as AW3D 30 or the Copernicus DEM). SRTM, however, is unique in that is a snapshot of the Earth's surface in 2000, so comparisons to much later topographic data (AW3D and Copernicus) should be able to detect large changes, such as big landslides. The accuracy of these DEM is not sufficient to find small changes, however.
#
# *If you sign in to OpenTopography and get an api key you can try to compare AW3D 30 to the Copernicus and NASADEM topographic data.*
# # Grid cell spacing on derivative data
# Lets turn our attention to derived datasets. Hillshade is derived from the DEM. But we can also take the slope, or curvature, or other metrics. We will use some more **gdal** and we will also use the histogram function in `rasterio`.
#
# One thing to note: if you take gradient or curvature using gdal or a GIS you will get a local result: the gradient in a pixel is based only on the local pixel. This can lead to noisy results, and we think taking a neighbourhood gradient is better. But that can come in the next lesson where we show you specialised software. For now we will stick to basics.
#
# We are going to use the two SRTM datasets since they come from the same underlying data but are at different grid spacings.
# !gdaldem slope RA_SRTM_UTM.tif RA_SRTM_UTM_S.tif -alg ZevenbergenThorne -p
# !gdaldem slope RA_SRTM3_UTM.tif RA_SRTM3_UTM_S.tif -alg ZevenbergenThorne -p
# Now we use the `rasterio` histogram function to look at the data.
# +
from rasterio.plot import show_hist
fig, (ax1,ax2) = plt.subplots(1, 2, figsize=(14,7))
RA_SRTM_S = rio.open("RA_SRTM_UTM_S.tif")
RA_SRTM3_S = rio.open("RA_SRTM3_UTM_S.tif")
show_hist(RA_SRTM_S, bins=100, histtype='stepfilled',lw=0.0, stacked=False, alpha=0.3,ax=ax1, title = "SRTM 30m")
ax1.set_xlabel("% slope")
ax1.set_xlim([0,100])
ax1.get_legend().remove()
show_hist(RA_SRTM3_S, bins=100, histtype='stepfilled',lw=0.0, stacked=False, alpha=0.3,ax=ax2, title = "SRTM 90m")
ax2.set_xlabel("% slope")
ax2.set_xlim([0,100])
ax2.get_legend().remove()
# -
# If you look at this data, you will see that the slope is gentler in the 90 m data. This is systematic. Coarse DEMs underestimate topographic gradients. They also give systematic biases in other derivative datasets (like curvature). *This is important because topographic gradient has been associated with (amongst other things) landslide risk and curvature has been associated with erosion rate.*
#
# You can read all about this in the following paper: [Grieve et al., 2016](https://esurf.copernicus.org/articles/4/627/2016/)
# ## What you should have learned and potential modifications
# * You will have more experience downloading and warping raster data.
# * You have seen that not all datasets are of the same quality.
# * You will have seen that coarser grid spacing can lead to bias in derivative datasets, like topographic gradient.
#
# Further steps:
# * Try downloading the Copernicus DEM and comparing that to the ALOS world 3D data.
# * Try to zoom in on a different parts of the DEM (to test if you understand how that was done).
| Basic_topography/Lesson_03_simple_metrics_and_resolution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: autoeq
# language: python
# name: autoeq
# ---
# %load_ext autoreload
# %autoreload 2
import sys
from pathlib import Path
ROOT_PATH = Path().resolve().parent
if str(ROOT_PATH) not in sys.path:
sys.path.insert(1, str(ROOT_PATH))
import ipywidgets as widgets
from IPython.display import display
from measurements.rtings.rtings_crawler import RtingsCrawler
from measurements.crinacle.crinacle_crawler import CrinacleCrawler
from measurements.referenceaudioanalyzer.reference_audio_analyzer_crawler import ReferenceAudioAnalyzerCrawler
from measurements.oratory1990.oratory1990_crawler import Oratory1990Crawler
from measurements.average import average_measurements
# ## Crinacle
# Download measurement data from Drive folder to `measurements/crinacle/raw_data/FR Data (CSV)` and `measurements/crinacle/raw_data/IEM Measurements (TSV)` and run this code block
crawler = CrinacleCrawler()
crawler.process_new()
display(crawler.widget)
#average_measurements(input_dir=ROOT_PATH.joinpath('measurements', 'crinacle', 'data', 'onear', 'Ears-711'))
average_measurements(input_dir=ROOT_PATH.joinpath('measurements', 'crinacle', 'data', 'onear', 'GRAS 43AG-7'))
average_measurements(input_dir=ROOT_PATH.joinpath('measurements', 'crinacle', 'data', 'inear'))
# ## Reference Audio Analyzer
crawler = ReferenceAudioAnalyzerCrawler()
crawler.process_new()
display(crawler.widget)
# ## Rtings
crawler = RtingsCrawler()
crawler.process_new()
display(crawler.widget)
# ## oratory1990
# Parsing oratory1990's PDFs requires [Ghostscript](https://www.ghostscript.com/download/gsdnld.html) to be installed on the system
crawler = Oratory1990Crawler()
crawler.process_new()
display(crawler.widget)
#
| measurements/crawl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="https://www.microsoft.com/en-us/research/uploads/prod/2020/05/Attribution.png" width="400">
#
# <h1 align="left">Multi-investment Attribution: Distinguish the Effects of Multiple Outreach Efforts</h1>
#
# A startup that sells software would like to know whether its multiple outreach efforts were successful in attracting new customers or boosting consumption among existing customers. They would also like to distinguish the effects of several incentives on different kinds of customers. In other words, they would like to learn the **heterogeneous treatment effect** of each investment on customers' software usage.
#
# In an ideal world, the startup would run several randomized experiments where each customer would receive a random assortment of investments. However, this can be logistically prohibitive or strategically unsound: the startup might not have the resources to design such experiments or they might not want to risk losing out on big opportunities due to lack of incentives.
#
# In this customer scenario walkthrough, we show how tools from the [EconML](https://aka.ms/econml) library can use historical investment data to learn the effects of multiple investments.
# ### Summary
#
# 1. [Background](#Background)
# 2. [Data](#Data)
# 3. [Get Causal Effects with EconML](#Get-Causal-Effects-with-EconML)
# 4. [Understand Treatment Effects with EconML](#Understand-Treatment-Effects-with-EconML)
# 5. [Make Policy Decisions with EconML](#Make-Policy-Decisions-with-EconML)
# 6. [Conclusions](#Conclusions)
# # Background
#
# <img src="https://get.pxhere.com/photo/update-software-upgrade-laptop-computer-install-program-screen-system-repair-data-development-electronic-load-pc-process-progress-support-technical-load-1565823.jpg" width="400">
#
# In this scenario, a startup that sells software provides two types of incentives to its customers: technical support and discounts. A customer might be given one, both or none of these incentives.
#
# The startup has historical data on these two investments for 2,000 customers, as well as how much revenue these customers generated in the year after the investments were made. They would like to use this data to learn the optimal incentive policy for each existing or new customer in order to maximize the return on investment (ROI).
#
# The startup faces two challenges: 1) the dataset is biased because historically the larger customers received the most incentives and 2) the observed outcome combines effects from two different investments. Thus, they need a causal model that can accommodate multiple concurrent interventions.
#
# **Solution:** EconML’s `Doubly Robust Learner` model jointly estimates the effects of multiple discrete treatments. The model uses flexible functions of observed customer features to filter out spurious correlations in existing data and deliver the causal effect of each intervention on revenue.
#
# +
# Some imports to get us started
import warnings
warnings.simplefilter('ignore')
# Utilities
import os
import urllib.request
import numpy as np
import pandas as pd
# Generic ML imports
from xgboost import XGBRegressor, XGBClassifier
# EconML imports
from econml.dr import LinearDRLearner
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# -
# # Data
#
# The data* contains ~2,000 customers and is comprised of:
#
# * Customer features: details about the industry, size, revenue, and technology profile of each customer.
# * Interventions: information about which incentive was given to a customer.
# * Outcome: the amount of product the customer bought in the year after the incentives were given.
#
# Feature Name | Type | Details
# :--- |:--- |:---
# **Global Flag** | W | whether the customer has global offices
# **Major Flag** | W | whether the customer is a large consumer in their industry (as opposed to SMC - Small Medium Corporation - or SMB - Small Medium Business)
# **SMC Flag** | W | whether the customer is a Small Medium Corporation (SMC, as opposed to major and SMB)
# **Commercial Flag** | W | whether the customer's business is commercial (as opposed to public secor)
# **IT Spend** | W | \\$ spent on IT-related purchases
# **Employee Count** | W | number of employees
# **PC Count** | W | number of PCs used by the customer
# **Size** | X | customer's size given by their yearly total revenue
# **Tech Support** | T | whether the customer received tech support (binary)
# **Discount** | T | whether the customer was given a discount (binary)
# **Revenue** | Y | \\$ Revenue from customer given by the amount of software purchased
#
# **To protect the privacy of the startup's customers, the data used in this scenario is synthetically generated and the feature distributions don't correspond to real distributions. However, the feature names have preserved their names and meaning.*
# Import the sample multi-attribution data
file_url = "https://msalicedatapublic.blob.core.windows.net/datasets/ROI/multi_attribution_sample.csv"
multi_data = pd.read_csv(file_url)
# Data sample
multi_data.head()
# Define estimator inputs
T_bin = multi_data[
["Tech Support", "Discount"]
] # multiple interventions, or treatments
Y = multi_data["Revenue"] # amount of product purchased, or outcome
X = multi_data[["Size"]] # heterogeneity feature
W = multi_data.drop(
columns=["Tech Support", "Discount", "Revenue", "Size"]
) # controls
# We investigate below whether the number of investments given is correlated with the size of the customer. We note that the average customer size is larger for more incentives given.
# Average customer size per incentive combination
multi_data[["Size", "Tech Support", "Discount"]].groupby(
by=["Tech Support", "Discount"], as_index=False
).mean().astype(int)
# The data was generated using the following underlying treatment effect function:
#
# $$
# \text{treatment_effect(Size)} = (5,000 + 2\% \cdot \text{Size}) \cdot I_\text{Tech Support} + (5\% \cdot \text{Size}) \cdot I_\text{Discount}
# $$
#
# Therefore, the treatment effect depends on the customer's size as follows: tech support provides an consumption boost of \$5,000 + 2\% Size and a discount provides an consumption boost of 5\% Size.**This is the relationship we seek to learn from the data.**
# +
# Define underlying treatment effect function
TE_fn = lambda X: np.hstack([5000 + 2 / 100 * X, 5 / 100 * X])
true_TE = TE_fn(X)
# Define true coefficients for the three treatments
# The third coefficient is just the sum of the first two since we assume an additive effect
true_coefs = [2 / 100, 5 / 100, 7 / 100]
true_intercepts = [5000, 0, 5000]
treatment_names = ["Tech Support", "Discount", "Tech Support & Discount"]
# -
# # Get Causal Effects with EconML
# To get causal effects, we use EconML's `LinearDRLearner`* estimator. This estimator requires a set of discrete treatments $T$ that corresponds to different types of interventions. Thus, we first map the binary interventions tech support and discount into one categorical variable:
#
# Tech support| Discount| Treatment encoding| Details
# :--- |:--- |:--- |:---
# 0 | 0 | 0 | no incentive
# 1 | 0 | 1 | tech support only
# 0 | 1 | 2 | discount only
# 1 | 1 | 3 | both incentives
#
# The estimator takes as input the outcome of interest $Y$ (amount of product purchased), a discrete treatment $T$ (interventions given), heterogeneity features $X$ (here, customer's size) and controls $W$ (all other customer features).
#
#
# The LinearDRLearner also requires two auxiliary models to model the relationships $T\sim (W, X)$ (`model_propensity`) and $Y \sim (W, X)$(`model_regression`). These can be generic, flexible classification and regression models, respectively.
#
#
# **This estimator assumes a linear relationship between the treatment effect and a transformation of the features $X$ (e.g. a polynomial basis expansion). For more generic forms of the treatment effect, see the `DRLearner` estimator.*
# +
# Transform T to one-dimensional array with consecutive integer encoding
def treat_map(t):
return np.dot(t, 2 ** np.arange(t.shape[0]))
T = np.apply_along_axis(treat_map, 1, T_bin).astype(int)
# -
# Train EconML model with generic helper models
model = LinearDRLearner(
model_regression=XGBRegressor(learning_rate=0.1, max_depth=3),
model_propensity=XGBClassifier(learning_rate=0.1, max_depth=3, objective="multi:softmax"),
random_state=1,
)
# Specify final stage inference type and fit model
model.fit(Y=Y, T=T, X=X, W=W, inference="statsmodels")
# # Understand Treatment Effects with EconML
#
# We can obtain a summary of the coefficient values as well as confidence intervals by calling the `summary` function on the fitted model for each treatment.
for i in range(model._d_t[0]):
print(f"Investment: {treatment_names[i]}")
print(f"True treatment effect: {true_intercepts[i]} + {true_coefs[i]}*Size")
display(model.summary(T=i + 1))
# From the summary panels, we see that the learned coefficients/intercepts are close to the true coefficients/intercepts and the p-values are small for most of these.
#
# We further use the `coef_, coef__interval` and the `intercept_, intercept__interval` methods to obtain the learned coefficient values and build confidence intervals. We compare the true and the learned coefficients through the plots below.
# +
# Compare learned coefficients with true model coefficients
# Aggregate data
coef_indices = np.arange(model._d_t[0])
coefs = np.hstack([model.coef_(T=i) for i in 1 + coef_indices])
intercepts = np.hstack([model.intercept_(T=i) for i in 1 + coef_indices])
# Calculate coefficient error bars for 90% confidence interval
coef_error = np.hstack([model.coef__interval(T=i) for i in 1 + coef_indices])
coef_error[0, :] = coefs - coef_error[0, :]
coef_error[1, :] = coef_error[1, :] - coefs
# Calculate intercept error bars for 90% confidence interval
intercept_error = np.vstack(
[model.intercept__interval(T=i) for i in 1 + coef_indices]
).T
intercept_error[0, :] = intercepts - intercept_error[0, :]
intercept_error[1, :] = intercept_error[1, :] - intercepts
# +
# Plot coefficients
plt.figure(figsize=(6, 5))
ax1 = plt.subplot(2, 1, 1)
plt.errorbar(
coef_indices,
coefs,
coef_error,
fmt="o",
label="Learned values\nand 90% confidence interval",
)
plt.scatter(coef_indices, true_coefs, color="C1", label="True values", zorder=3)
plt.xticks(coef_indices, treatment_names)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.title("Coefficients")
plt.legend(loc=(1.05, 0.65))
plt.grid()
# Plot intercepts
plt.subplot(2, 1, 2)
plt.errorbar(coef_indices, intercepts, intercept_error, fmt="o")
plt.scatter(coef_indices, true_intercepts, color="C1", zorder=3)
plt.xticks(coef_indices, treatment_names)
plt.title("Intercepts")
plt.grid()
plt.show()
# -
# # Make Policy Decisions with EconML
#
# Investments such as tech support and discounts come with an associated cost. Thus, we would like to know what incentives to give to each customer to maximize the profit from their increased engagement. This is the **treatment policy**.
#
# In this scenario, we define a cost function as follows:
# * The cost of `tech support` scales with the number of PCs a customer has. You can imagine that if the software product needs tech support to be installed on each machine, there is a cost (\\$100 here) per machine.
# * The cost of `discount` is a fixed \\$7,000. Think of this as giving the customer the first \\$7,000 worth of product for free.
# * The cost of `tech support` and `discount` is the sum of the cost of each of these. Note that this might not be the case in every business application: it is possible that managing multiple incentive programs can add overhead.
# Define cost function
def cost_fn(multi_data):
t1_cost = multi_data[["PC Count"]].values * 100
t2_cost = np.ones((multi_data.shape[0], 1)) * 7000
return np.hstack([t1_cost, t2_cost, t1_cost + t2_cost])
# We use the model's `const_marginal_effect` method to find the counterfactual treatment effect for each possible treatment. We then subtract the treatment cost and choose the treatment which the highest return. That is the recommended policy.
# Get roi for each customer and possible treatment
potential_roi = model.const_marginal_effect(X=X.values) - cost_fn(multi_data)
# Add a column of 0s for no treatment
potential_roi = np.hstack([np.zeros(X.shape), potential_roi])
all_treatments = np.array(["None"] + treatment_names)
recommended_T = np.argmax(potential_roi, axis=1)
ax1 = sns.scatterplot(
x=X.values.flatten(),
y=multi_data["PC Count"].values,
hue=all_treatments[recommended_T],
hue_order=all_treatments,
cmap="Dark2",
s=40,
)
plt.legend(title="Investment Policy")
plt.setp(
ax1,
xlabel="Customer Size",
ylabel="PC Count",
title="Optimal Investment Policy by Customer",
)
plt.show()
# We compare different policies: the optimal policy we learned, the current policy, and the policy under which each customer is given all incentives. We note that the optimal policy has a much higher ROI than the alternatives.
roi_current = potential_roi[np.arange(X.shape[1]), T].sum()
roi_optimal = potential_roi[np.arange(X.shape[1]), recommended_T].sum()
roi_bothT = potential_roi[:, -1].sum()
all_rois = np.array([roi_optimal, roi_current, roi_bothT])
Y_baseline = (Y - model.effect(X=X.values, T1=T)).sum()
pd.DataFrame(
{
"Policy": ["Optimal", "Current", "All Investments"],
"ROI ($)": all_rois,
"ROI (% of baseline Y)": np.round(all_rois / Y_baseline * 100, 1),
}
)
# # Conclusions
#
# In this notebook, we have demonstrated the power of using EconML to:
#
# * Learn the effects of multiple concurrent interventions
# * Interpret the resulting individual-level treatment effects
# * Build investment policies around the learned effects
#
# To learn more about what EconML can do for you, visit our [website](https://aka.ms/econml), our [GitHub page](https://github.com/microsoft/EconML) or our [documentation](https://econml.azurewebsites.net/).
| notebooks/CustomerScenarios/Case Study - Multi-investment Attribution at A Software Company.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# # Machine Learning Real World Projects in Python
# ## SONAR Rock vs Mine Prediction
# ### Predict whether an object is either Rock or Mine with SONAR Data
# [](https://www.python.org/)
#
# [](https://jupyter.org/try)
#
# [](https://www.linkedin.com/in/chus-santana/)
#
# [](https://github.com/jesussantana)
# + [markdown] id="mYSLvRgB3Sel"
# Importing the Dependencies
# + id="cbE3ZjDb23el"
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# + [markdown] id="fCLGacZR4UZx"
# Data Collection and Data Processing
# + id="7ymxgj2i3RwO"
#loading the dataset to a pandas Dataframe
sonar_data = pd.read_csv('../../data/raw/sonar data.csv', header=None)
# + colab={"base_uri": "https://localhost:8080/", "height": 218} id="I5iWxSnM42fl" outputId="1b2221d0-dd71-40b2-c7fc-272017ea53f7"
sonar_data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="WN_FI_eN48V_" outputId="5d4d105c-657c-4eec-df90-e2f4a0cbf837"
# number of rows and columns
sonar_data.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 308} id="q6A1r9J-5aOJ" outputId="9efbb9de-570b-4d9f-a92a-d6835f3acb2d"
sonar_data.describe() #describe --> statistical measures of the data
# + colab={"base_uri": "https://localhost:8080/"} id="XFlxfDyk5o00" outputId="4ca928a6-de7b-439e-8a94-bc89a4449189"
sonar_data[60].value_counts()
# + [markdown] id="_6RDFTc26aBI"
# M --> Mine
#
# R --> Rock
# + colab={"base_uri": "https://localhost:8080/", "height": 158} id="Uis1XlFs6M09" outputId="e7c33de6-9384-44d6-a66d-e60cbf0d8662"
sonar_data.groupby(60).mean()
# + id="qRShuFc46jLd"
# separating data and Labels
X = sonar_data.drop(columns=60, axis=1)
Y = sonar_data[60]
# + colab={"base_uri": "https://localhost:8080/"} id="mkRRrxIe7D7l" outputId="4dbba519-c5cf-4721-dafa-3c2c31439206"
print(X)
print(Y)
# + [markdown] id="j912DrKe7L03"
# Training and Test data
# + id="bTnEFld87GIr"
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.1, stratify=Y, random_state=1)
# + colab={"base_uri": "https://localhost:8080/"} id="ww4D1Ps379_h" outputId="5a7bbea6-aafb-4978-c7ef-0e28506f5ee8"
print(X.shape, X_train.shape, X_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="KBvcm4eR8enA" outputId="4878f4d0-12c2-4d69-8f07-1b23b1b69555"
print(X_train)
print(Y_train)
# + [markdown] id="rKLgrLOx8LQx"
# Model Training --> Logistic Regression
# + id="UoM3FhQS8FAw"
model = LogisticRegression()
# + colab={"base_uri": "https://localhost:8080/"} id="GGomegF-8TPv" outputId="688846eb-1b29-4e7f-9905-7956d013dc72"
#training the Logistic Regression model with training data
model.fit(X_train, Y_train)
# + [markdown] id="585vgP7b8vBn"
# Model Evaluation
# + id="kCBykEtO8pLi"
#accuracy on training data
X_train_prediction = model.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="50Wqy2Rc9nL1" outputId="cc9d8c2e-92ee-4047-e6c6-3bee7fbb359c"
print('Accuracy on training data : ', training_data_accuracy)
# + id="RCUZ6MuR9tOV"
#accuracy on test data
X_test_prediction = model.predict(X_test)
test_data_accuracy = accuracy_score(X_test_prediction, Y_test)
# + colab={"base_uri": "https://localhost:8080/"} id="04AsqCrz99vU" outputId="41e1fed1-ef90-4937-9d9a-a7d23a16504e"
print('Accuracy on test data : ', test_data_accuracy)
# + [markdown] id="RKrIzmr8-K9s"
# Making a Predictive System
# + colab={"base_uri": "https://localhost:8080/"} id="NMp-UfOd-B7B" outputId="a7aaeda1-f37b-4719-ab0a-01571db68419"
input_data = (0.0307,0.0523,0.0653,0.0521,0.0611,0.0577,0.0665,0.0664,0.1460,0.2792,0.3877,0.4992,0.4981,0.4972,0.5607,0.7339,0.8230,0.9173,0.9975,0.9911,0.8240,0.6498,0.5980,0.4862,0.3150,0.1543,0.0989,0.0284,0.1008,0.2636,0.2694,0.2930,0.2925,0.3998,0.3660,0.3172,0.4609,0.4374,0.1820,0.3376,0.6202,0.4448,0.1863,0.1420,0.0589,0.0576,0.0672,0.0269,0.0245,0.0190,0.0063,0.0321,0.0189,0.0137,0.0277,0.0152,0.0052,0.0121,0.0124,0.0055)
# changing the input_data to a numpy array
input_data_as_numpy_array = np.asarray(input_data)
# reshape the np array as we are predicting for one instance
input_data_reshaped = input_data_as_numpy_array.reshape(1,-1)
prediction = model.predict(input_data_reshaped)
print(prediction)
if (prediction[0]=='R'):
print('The object is a Rock')
else:
print('The object is a mine')
# + id="tcg9Er11_TSv"
| notebooks/AI in Technology - SONAR Rock vs Mine Prediction/ML_Rock_vs_Mine_Prediction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from sklearn.decomposition import LatentDirichletAllocation
import pandas as pd
data = pd.read_csv('reddit-data/train_sw.csv')
data.head()
to_drop = ['Unnamed: 1', 'Unnamed: 2']
data_f = data.drop(to_drop, axis = 'columns')
data_f.head()
# +
# data
# -
# # Input
text = 'Most hotlines are at least partially staffed by trained volunteers, so check out agencies in your area.'
pred = {'body': text}
data = data_f.append(pred, ignore_index = True);
data
data.head()
from sklearn.feature_extraction.text import CountVectorizer
data.shape
cv = CountVectorizer(max_df=0.9,min_df=2,stop_words='english')
dtm = cv.fit_transform(data['body'])
dtm
from sklearn.decomposition import LatentDirichletAllocation
LDA = LatentDirichletAllocation(n_components=3,random_state=32)
LDA.fit(dtm)
len(cv.get_feature_names())
type(cv.get_feature_names())
import random
random_word_id = random.randint(0,1610)
cv.get_feature_names()[random_word_id]
len(LDA.components_)
type(LDA.components_)
LDA.components_.shape
single_topic = LDA.components_[0]
single_topic.argsort()
import numpy as np
single_topic.argsort()[-10:]
top_ten_words = single_topic.argsort()[-20:]
for index in top_ten_words:
print(cv.get_feature_names()[index])
for i,topic in enumerate(LDA.components_):
print(f"top 30 words for topic #{i}")
print([cv.get_feature_names()[index] for index in topic.argsort()[-30:]])
print('\n')
print('\n')
topic_results = LDA.transform(dtm)
topic_results.shape
data['Topic'] = topic_results.argmax(axis=1)
data
len(data)
# # Output
def predict(val):
if val == 0:
print('Suicidal')
elif val == 1:
print('Abusive/Threatening')
else:
print('Completely Normal')
val = data['Topic'][len(data)-1]
predict(val)
| models/Model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
def init(context):
context.s1 = "AG1612"
context.fired_now = 0
subscribe(context.s1)
logger.info("RunInfo: {}".format(context.run_info))
def handle_tick(context, tick):
last_price = []
num = []
for i in range(0, 300000, 300):
if context.fired_now == i:
X = history_ticks(context.s1, 300)
for item in X:
last_price.append(str(item.last))
num.append(str(item.volume))
str_last = ",".join(last_price)
str_last = str_last + "\n"
str_volume = ",".join(num)
str_volume = str_volume + "\n"
put_file('last_file.csv', str_last, append=True)
put_file('volume.csv', str_volume, append=True)
context.fired_now += 1
| data_ai/comp3035/rqbasic/fetch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd, requests, numpy as np
import urllib.request
import bar_chart_race as bcr
import matplotlib as mpl
import matplotlib.pyplot as plt
##LOAD Town COVIDATA [MOH-IL]
url = 'https://data.gov.il/api/3/action/datastore_search?resource_id=8a21d39d-91e3-40db-aca1-f73f7ab1df69&limit=150000'
json = requests.get(url).json()
df = pd.DataFrame(json['result']['records'])
##load town data [CBS-IL]
url='https://www.cbs.gov.il/he/publications/doclib/2019/ishuvim/bycode2019.xlsx'
cbs_town = pd.read_excel(url)
# +
###DOWNLOAD corona town data from odata.org.il
url = 'https://www.odata.org.il/dataset/93c6fa7d-c976-4a24-b8e9-f3bf466d6cf5/resource/83ab3d8b-ec7b-4929-a3b3-bfe83cfe3cb0/download/.xlsx'
r = requests.get(url, allow_redirects=True)
file_loc = 'data/opendata/corona_towns.xlsx'
open (file_loc, 'wb').write(r.content)
# -
##Load full town data
df_town_full = pd.read_excel(file_loc)
#rename_col
df_town_full.rename(columns={'city_desc':'city_name'}, inplace=True)
df_town_full.columns
df_town_full.city_name.dropna().drop_duplicates()
towns_pop = df_town_full[['city_name','total_pop']].dropna().drop_duplicates().reset_index(drop=True)
towns_pop.loc[towns_pop.total_pop>=100000]
# +
merged_towns = towns_pop.loc[towns_pop.total_pop>=100000].merge(covidf_town[['city_name','pop']],
on='city_name')
merged_towns.rename(columns={'total_pop':'pop_moh','pop':'pop_cbs19'},inplace=True)
merged_towns['pop_dif'] = merged_towns['pop_moh'] - merged_towns['pop_cbs19']
merged_towns['pop_dif_pct'] = np.round(merged_towns['pop_dif'] / merged_towns['pop_moh'] * 100,2)
merged_towns
# +
##lower case column name
df = df.rename(columns=lambda x:x.lower())
## use 10 instead of less than
df = df.replace('<15','10')
## cols data types
cols = ['city_code','cumulative_verified_cases',
'cumulated_recovered', 'cumulated_deaths', 'cumulated_number_of_tests',
'cumulated_number_of_diagnostic_tests']
for x in cols:
df[x] = df[x].astype('int')
df['date']=pd.to_datetime(df.date)
# df.dtypes
# df.head()
# +
## town_df
town_df = cbs_town[['סמל יישוב', 'סך הכל אוכלוסייה 2019']].rename(columns={'סמל יישוב' : 'city_code',
'סך הכל אוכלוסייה 2019' : 'pop'})
##convert population na to 0
town_df['pop'] = town_df['pop'].fillna(0).astype('int')
##merge to main COVIDF
covidf = df.merge(town_df, on='city_code')
# +
# covidf_town
covidf_town = covidf[['city_code','city_name','pop']].drop_duplicates().sort_values(by='pop', ascending=False).reset_index(drop=True)
# remove tribes (0 population)
tribes = covidf_town.loc[covidf_town['pop']==0,'city_name'].unique()## len == 10
covidf_town = covidf_town.loc[~covidf_town.city_name.isin(tribes)]
covidf_town['pop'] = covidf_town['pop'].astype('int')
##SET City_Type
## City >=100,000 ; Town >=10,0000; Village < 10,000
covidf_town['city_type'] = ['City' if x >=100000 else 'Town' if x >=10000 else 'Village' for x in covidf_town['pop']]
# covidf_town[['pop']].plot.hist(bins=10)
#norm coefficients
norm_by_x = lambda x: covidf_town['pop']/x
covidf_town = covidf_town.assign(norm100k=norm_by_x(10**5), norm10k=norm_by_x(10**4), norm1k=norm_by_x(10**4))
##add city type to main COVIDF
covidf = covidf.merge(covidf_town[['city_code','city_type', 'norm100k', 'norm10k', 'norm1k']], on='city_code', how='left')
# len(covidf_town) ##261
# -
covidf.columns
##TEST
# col = 'cumulated_deaths'; city_type ='Town'
covidf.loc[covidf.city_name=='ירושלים'].iloc[-1]
# +
##normalize column for city_type
def barChartDFandNameTotal(covidf=covidf, col='cumulated_deaths', city_type='City'):
df_val = covidf.query("city_type == '{}'".format(city_type))
value_name = '{}_total_{}'.format(city_type.lower(), col.replace('cumulated_',''))
#revert city name (hebrew)
df_val['city_name'] = df_val['city_name'].apply(lambda x:x[::-1])
#WIDE format
df_bar = df_val.pivot(index='date',columns='city_name', values=col)
# #clean and remove all zeros
df_bar.columns.name = ''
df_bar = df_bar.loc[df_bar.apply(lambda x: x.sum(), axis=1) > 0]
print(value_name)
print("number of {}: {}".format(city_type, len(df_bar.columns)))
return df_bar, value_name
# df_c = df_p.dropna(how='all').dropna(how='all', axis=1).bfill().ffill()
# df_c.head()
# +
##normalize column for city_type
def barChartDFandNameNorm(covidf=covidf, col='cumulated_deaths', city_type='City'):
df_val = covidf.query("city_type == '{}'".format(city_type))
k_col = 'norm100k' if city_type == 'City' else 'norm10k' if city_type == 'Town' else 'norm1k'
df_val= df_val.assign(value=df_val[col] / df_val[k_col])
value_name = '{}_{}_per_{}'.format(city_type.lower(), col.replace('cumulated_',''),k_col.replace('norm',''))
#revert city name (hebrew)
df_val['city_name'] = df_val['city_name'].apply(lambda x:x[::-1])
#WIDE format
df_bar = df_val.pivot(index='date',columns='city_name', values='value')
# #clean and remove all zeros
df_bar.columns.name = ''
df_bar = df_bar.loc[df_bar.apply(lambda x: x.sum(), axis=1) > 0]
print(value_name)
print("number of {}: {}".format(city_type, len(df_bar.columns)))
return df_bar, value_name
# df_c = df_p.dropna(how='all').dropna(how='all', axis=1).bfill().ffill()
# df_c.head()
# -
wnorm100k = (covidf_town.norm100k * covidf_town['pop']).sum() / covidf_town['pop'].sum()
wnorm100k
def barChartRaceCreator(df_bar, value_name, city_type, n_bars=16, wnorm100k=wnorm100k):
title_name = lambda x: x.title().replace('_', ' ')
total_title = value_name.title().replace('_',' ').replace('{} '.format(city_type), '')
##summary func
if 'per' in value_name and city_type=='City':
sum_func = lambda v, r: {'x': .98, 'y': .13,
's': '{}: {:,.0f} (TOTAL: {})'.format(total_title, v.nlargest(n_bars).sum(),
int(np.round(v.nlargest(n_bars).sum()*wnorm100k))),
'family':'Lato', 'ha': 'right', 'size': 10}
else:
sum_func = lambda v, r: {'x': .98, 'y': .13,
's': '{}: {:,.0f}'.format(total_title, v.nlargest(n_bars).sum()),
'family':'Lato', 'ha': 'right', 'size': 10}
bcr.bar_chart_race(df_bar,
n_bars=n_bars,
figsize=(7,5),
filter_column_colors=True,
period_fmt='%B %d, %Y',
period_label={'x': .98, 'y': .2, 'ha': 'right', 'va': 'center',
'family':'Lato','weight':'bold', 'size':14},
period_summary_func=sum_func,
filename='barace/{}.mp4'.format(value_name),
shared_fontdict={'family' : 'Open Sans Hebrew','weight':'normal'},
period_length=200,
title = 'COVID in Israel - {}'.format(title_name(value_name)),
title_size=11,
tick_label_size=10,
bar_label_size=11,
cmap='tab20')
##rcParams - mpl style
mpl.rcParams['font.serif'] = ['Lato']
mpl.rcParams['axes.titleweight'] ='bold'
# +
# ###TEST
# df_bar, value_name = barChartDFandNameNorm(city_type='City')
# df_bar_test = df_bar.loc[df_bar.index.month.isin([4])]
# barChartRaceCreator(df_bar_test, value_name, city_type='City')
# -
## CREATE NORM AND TOTAL BAR CHARTS for City, Village and Town - this may take sometime...
for city_type in ['City']:# ,'Town','Village']:
print(city_type)
df_bar, value_name = barChartDFandNameNorm(city_type=city_type)
barChartRaceCreator(df_bar, value_name, city_type)
# df_bar, value_name = barChartDFandNameTotal(city_type=city_type)
# barChartRaceCreator(df_bar, value_name, city_type)
# ### Current Situation
latest_date = covidf.date.max()
print(latest_date)
df_latest = covidf.loc[covidf.date==latest_date]
df_city = df_latest.loc[df_latest.city_type=='City']
df_city = df_city.assign(norm_death_100k=df_city.cumulated_deaths/df_city.norm100k)
df_city.sort_values(by='norm_death_100k',inplace=True)
df_city.head()
# +
fig, ax = plt.subplots(figsize=(7,5), dpi=300)
ax.barh(df_city['city_name'].apply(lambda x:x[::-1]), df_city.norm_death_100k)
ax.set_xlabel('הרוגים פר מאה אלף תושבים'[::-1], weight='bold', fontsize=14)
plt.grid(True, axis='x')
ax.set_axisbelow(True)
ax.text(x=.95,y=.1,s=latest_date.strftime("%B %d, %Y"),weight='bold',fontsize=16,ha='right', transform=ax.transAxes)
ax.set_title("מספר הרוגים פר מאה אלף תושבים בערים גדולות"[::-1], weight='bold', fontsize=16)
rects = ax.patches
# Make some labels.
labels = ["label%d" % i for i in np.arange(len(rects))]
for i, rect in enumerate(rects):
width = rect.get_width()
reclabel = int(width)
ax.text(width, i, reclabel,
ha='left', va='center')
plt.show()
fig.savefig('graphs/town/latest_deaths_norm_city.jpg')
# -
| CoronaTowns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
from IPython.html import widgets
from IPython.display import display
from d3networkx import ForceDirectedGraph, EventfulGraph, empty_eventfulgraph_hook
# Hook into the random_graphs NetworkX code.
# +
from networkx.generators import random_graphs
from networkx.generators import classic
# Add a listener to the eventful graph's construction method.
# If an eventful graph is created, build and show a widget
# for the graph.
def handle_graph(graph):
print(graph.graph._sleep)
graph_widget = ForceDirectedGraph(graph)
display(graph_widget)
EventfulGraph.on_constructed(handle_graph)
# Replace the empty graph of the networkx classic module with
# the eventful graph type.
random_graphs.empty_graph = empty_eventfulgraph_hook(sleep=0.2)
# -
# ## <NAME>
random_graphs.barabasi_albert_graph(15, 1)
random_graphs.barabasi_albert_graph(15, 2)
random_graphs.barabasi_albert_graph(10, 5)
# ## <NAME>
random_graphs.newman_watts_strogatz_graph(15, 3, 0.25)
# ## Barbell
classic.barbell_graph(5,0,create_using=EventfulGraph(sleep=0.1))
# ## Circular Ladder
classic.circular_ladder_graph(5,create_using=EventfulGraph(sleep=0.1))
classic.circular_ladder_graph(10,create_using=EventfulGraph(sleep=0.1))
# ## Ladder
classic.ladder_graph(10,create_using=EventfulGraph(sleep=0.1))
# ## Star
classic.star_graph(10,create_using=EventfulGraph(sleep=0.1))
| d3networkx_psctb_files/examples/demo generators.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Codebook
# **Authors:** <NAME>
# Documenting existing data files of DaanMatch with information about location, owner, "version", source etc.
import boto3
import numpy as np
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
# %matplotlib inline
from collections import Counter
import statistics
client = boto3.client('s3')
resource = boto3.resource('s3')
my_bucket = resource.Bucket('my-bucket')
# # Districts--.csv
# ## TOC:
# * [About this dataset](#1)
# * [What's in this dataset](#2)
# * [Codebook](#3)
# * [Missing values](#3.1)
# * [Summary statistics](#3.2)
# * [Columns](#4)
# * [Name](#4.1)
# * [Value](#4.2)
# **About this dataset** <a class="anchor" id="1"></a>
# Data provided by: Unknown.
# Source: https://daanmatchdatafiles.s3-us-west-1.amazonaws.com/DaanMatch_DataFiles/Districts--.csv
# Type: csv
# Last Modified: May 29, 2021, 19:54:25 (UTC-07:00)
# Size: 11.6 KB
path = "s3://daanmatchdatafiles/DaanMatch_DataFiles/Districts--.csv"
districts = pd.read_csv(path)
districts
# **What's in this dataset?** <a class="anchor" id="2"></a>
print("Shape:", districts.shape)
print("Rows:", districts.shape[0])
print("Columns:", districts.shape[1])
print("Each row is a district in India.")
# **Codebook** <a class="anchor" id="3"></a>
# +
districts_columns = [column for column in districts.columns]
districts_description = ["Same as the Value column.",
"Name of District in India. There are 28 states in India and 8 union territories, all of which have territories within them. This column represents the names of those territories.",
"This value column has no real meaning, it is meant purely to count the districts."]
districts_dtypes = [dtype for dtype in districts.dtypes]
data = {"Column Name": districts_columns, "Description": districts_description, "Type": districts_dtypes}
districts_codebook = pd.DataFrame(data)
districts_codebook.style.set_properties(subset=['Description'], **{'width': '600px'})
# -
# **Missing values** <a class="anchor" id="3.1"></a>
districts.isnull().sum()
# There are 739 districts in India. There are only 674 rows in this dataset, so there are at least 65 districts missing from this list.
total_districts = np.arange(0, 738)
included_districts = np.array(districts['Value'])
missing_districts = []
for i in total_districts:
if (i not in included_districts):
missing_districts.append(i)
print("Number of Missing Districts:", len(missing_districts))
# **Summary statistics** <a class="anchor" id="3.2"></a>
districts.describe()
# ## Columns
# <a class="anchor" id="4"></a>
# ### Name
# <a class="anchor" id="4.1"></a>
# Name of District in India. There are 28 states in India and 8 union territories, all of which have territories within them. This column represents the names of those territories.
column = districts["Name"]
column
# +
print("No. of unique values:", len(column.unique()))
# Check for duplicates
counter = dict(Counter(column))
duplicates = { key:value for key, value in counter.items() if value > 1}
print("Duplicates:", duplicates)
if len(duplicates) > 0:
print("No. of duplicates:", len(duplicates))
# -
districts.loc[districts['Name'].isin(duplicates.keys())].sort_values("Name")
# ### Value
# <a class="anchor" id="4.2"></a>
# This value column has no real meaning, it is meant purely to count the districts.
column = districts["Value"]
column
# +
print("No. of unique values:", len(column.unique()))
# Check for duplicates
counter = dict(Counter(column))
duplicates = { key:value for key, value in counter.items() if value > 1}
print("Duplicates:", duplicates)
if len(duplicates) > 0:
print("No. of duplicates:", len(duplicates))
# -
districts.loc[districts['Value'].isin(duplicates.keys())]
| [DIR] archive/Districts--/Districts--.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Libraries
import pandas as pd
import numpy as np
from scipy import stats
import scipy.io
from scipy.spatial.distance import pdist
from scipy.linalg import cholesky
import matlab.engine as engi
import matlab as mat
import math
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
from scipy.io import loadmat
import SMOTE
# # Start matlab service
eng = engi.start_matlab()
eng.addpath(r'matlab_CTKCCA/',nargout=0)
eng.addpath(r'matlab_KS/',nargout=0)
# # variables
# +
source_data_path = 'data/1385/converted/bzbyte.csv'
target_data_path = 'data/1385/converted/scite-ru.csv'
result_path = 'result/result.csv'
repeats = 20
ratio = 0.1
lrank = 70
reg = 1E-5
# -
# # Data loading and Normalizing Data
def load_data(path,source):
df = pd.read_csv(path)
df = df.drop(labels = ['Host','Vcs','Project','File','PL','IssueTracking'],axis=1)
df = df.dropna()
df = df[['TLOC', 'TNF', 'TNC', 'TND', 'LOC', 'CL', 'NStmt', 'NFunc',
'RCC', 'MNL', 'avg_WMC', 'max_WMC', 'total_WMC', 'avg_DIT', 'max_DIT',
'total_DIT', 'avg_RFC', 'max_RFC', 'total_RFC', 'avg_NOC', 'max_NOC',
'total_NOC', 'avg_CBO', 'max_CBO', 'total_CBO', 'avg_DIT.1',
'max_DIT.1', 'total_DIT.1', 'avg_NIV', 'max_NIV', 'total_NIV',
'avg_NIM', 'max_NIM', 'total_NIM', 'avg_NOM', 'max_NOM', 'total_NOM',
'avg_NPBM', 'max_NPBM', 'total_NPBM', 'avg_NPM', 'max_NPM', 'total_NPM',
'avg_NPRM', 'max_NPRM', 'total_NPRM', 'avg_CC', 'max_CC', 'total_CC',
'avg_FANIN', 'max_FANIN', 'total_FANIN', 'avg_FANOUT', 'max_FANOUT',
'total_FANOUT', 'NRev', 'NFix', 'avg_AddedLOC', 'max_AddedLOC',
'total_AddedLOC', 'avg_DeletedLOC', 'max_DeletedLOC',
'total_DeletedLOC', 'avg_ModifiedLOC', 'max_ModifiedLOC',
'total_ModifiedLOC','Buggy']]
d = {'buggy': True, 'clean': False}
df['Buggy'] = df['Buggy'].map(d)
if source:
df = apply_smote(df)
return df
def apply_smote(df):
cols = df.columns
smt = SMOTE.smote(df)
df = smt.run()
df.columns = cols
return df
source_df = load_data(source_data_path,False)
target_df = load_data(target_data_path,False)
# # Matlab integration
# ## Matlab integration - CTKCCA
def transform_data(source_df,target_df):
mat_source_df = mat.double(source_df.values.T.tolist())
mat_target_df = mat.double(target_df.values.T.tolist())
X = eng.CTKCCA(mat_source_df,mat_target_df,nargout=4)
train_X,train_y = np.array(X[0]),np.array(X[1]).tolist()[0]
test_X,test_y = np.array(X[2]),np.array(X[3]).tolist()[0]
return train_X,train_y,test_X,test_y
trasformed_train_X,trasformed_train_y,trasformed_test_X,trasformed_test_y = transform_data(source_df,target_df)
train_df = pd.DataFrame(trasformed_train_X)
train_df['Buggy'] = trasformed_train_y
train_df = apply_smote(train_df)
trasformed_train_y = train_df.Buggy
trasformed_train_X = train_df.drop('Buggy',axis = 1)
clf = LogisticRegression()
clf.fit(trasformed_train_X,trasformed_train_y)
predicted = clf.predict(trasformed_test_X)
print(classification_report(trasformed_test_y, predicted))
# ## Matlab integration - KS
def transform_data(source_df,target_df):
mat_source_df = mat.double(source_df.values.T.tolist())
mat_target_df = mat.double(target_df.values.T.tolist())
X = eng.HDP_KS(mat_source_df,mat_target_df,nargout=4)
train_X,train_y = np.array(X[0]),np.array(X[1]).tolist()[0]
test_X,test_y = np.array(X[2]),np.array(X[3]).tolist()[0]
return train_X,train_y,test_X,test_y
trasformed_train_X,trasformed_train_y,trasformed_test_X,trasformed_test_y = transform_data(source_df,target_df)
train_df = pd.DataFrame(trasformed_train_X)
train_df['Buggy'] = trasformed_train_y
train_df = apply_smote(train_df)
trasformed_train_y = train_df.Buggy
trasformed_train_X = train_df.drop('Buggy',axis = 1)
clf = LogisticRegression()
clf.fit(trasformed_train_X,trasformed_train_y)
predicted = clf.predict(trasformed_test_X)
print(classification_report(trasformed_test_y, predicted))
# # Teting using original Data
# ## get train test data
def get_train_test_data(source_df,target_df):
train_y = source_df.Buggy
train_X = source_df.drop('Buggy',axis = 1)
test_y = target_df.Buggy
test_X = target_df.drop('Buggy',axis = 1)
return train_X,train_y,test_X,test_y
train_X,train_y,test_X,test_y = get_train_test_data(source_df,target_df)
clf = LogisticRegression()
clf.fit(train_X,train_y)
predicted = clf.predict(test_X)
print(classification_report(test_y, predicted))
train_y[train_y == True].shape,train_y[train_y == False].shape
test_y[test_y == True].shape,test_y[test_y == False].shape
| random_notebook_Experiments/CTKCCA_matlab_integrated_code-new-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
import os
import data.units as parameters_names
import transporters.transporters_factory as transporters_factory
from transporters.approximator.configuration import ApproximatorConfiguration
from data.grid_configuration import CanonicalCoordinatesGridConfiguration
from data.parameters_names import ParametersNames as Parameters
sns.set_style("whitegrid")
# -
path_to_serialized_approximator = "parametrization_6500GeV_0p4_185_reco.root"
approximator_name = "ip5_to_station_220_h_2_lhcb1"
approximator_configuration = ApproximatorConfiguration(path_to_serialized_approximator, approximator_name)
transporter = transporters_factory.get_transporter(approximator_configuration)
# +
# Parameters of bunch
x_mean = 0.0
delta_x = 10e-6
theta_x_mean = 0
delta_theta_x = 10e-6
y_mean = 0.0
delta_y = 10e-6
theta_y_mean = 0.00
delta_theta_y = 10e-6
pt_mean = -0.01
delta_pt = 0.001
particles_number = 1000
grid_configuration = CanonicalCoordinatesGridConfiguration.from_delta(x_mean, delta_x, particles_number,
theta_x_mean, delta_theta_x, 1,
y_mean, delta_y, 1,
theta_y_mean, delta_theta_y, 1,
pt_mean, delta_pt, 1)
particles = grid_configuration.generate_randomly()
# -
output_segments = transporter(particles)
# +
# Names of segments, like MQXA.3R5
start_segment_name = "start"
end_segment_name = "end"
if start_segment_name != "start":
output_segments[start_segment_name].T[1] -= 1
# -
output_matrix = output_segments[end_segment_name]
input_matrix = output_segments[start_segment_name]
def plot_histogram(x_name, y_name, x_name_prefix, y_name_prefix,
x_matrix, y_matrix,
plot_axes, plot_x_pos, plot_y_pos):
title = "Histogram " + x_name_prefix + parameters_names.alternative_version[x_name] + " and " + \
y_name_prefix + parameters_names.alternative_version[y_name] + " parameter"
# Get vectors and their properties
vector_x = pd.Series(x_matrix.get_canonical_coordinates_of(x_name).reshape((-1,)), name=x_name_prefix + parameters_names.alternative_version[x_name])
vector_y = pd.Series(y_matrix.get_canonical_coordinates_of(y_name).reshape((-1,)), name=y_name_prefix + parameters_names.alternative_version[y_name])
bins_number = 50
sns.distplot(vector_x, bins=bins_number, ax=axes[plot_x_pos][plot_y_pos], color='y').set_title(title)
sns.distplot(vector_y, bins=bins_number, ax=axes[plot_x_pos][plot_y_pos], color='b').set_title(title)
# +
f, axes = plt.subplots(2, 2)
fig = plt.gcf()
fig.set_size_inches(20,20)
plot_histogram(Parameters.X, Parameters.X, "In ", "Out ", input_matrix, output_matrix, axes, 0, 0)
plot_histogram(Parameters.Y, Parameters.Y, "In ", "Out ", input_matrix, output_matrix, axes, 0, 1)
plot_histogram(Parameters.THETA_X, Parameters.THETA_X, "In ", "Out ", input_matrix, output_matrix, axes, 1, 0)
plot_histogram(Parameters.THETA_Y, Parameters.THETA_Y, "In ", "Out ", input_matrix, output_matrix, axes, 1, 1)
# -
| notebooks/Histograms of transport- Approximator.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Refactored As Operational Pipeline (with MLRun)
# Install prerequisites
# %pip install mlrun lightgbm shapely
# ## Create an MLRun project and configuration
# +
from os import path
import mlrun
project_name_base = 'nyc-taxi'
project_name, artifact_path = mlrun.set_environment(project=project_name_base, user_project=True)
print(f'Project name: {project_name}')
print(f'Artifact path: {artifact_path}')
# -
# ## Define Nuclio and MLRun Functions
# +
# nuclio: start-code
# -
from os import path
import numpy as np
import pandas as pd
import datetime as dt
from sklearn.model_selection import train_test_split
import lightgbm as lgbm
from mlrun.execution import MLClientCtx
from mlrun.datastore import DataItem
from pickle import dumps
import shapely.wkt
def get_zones_dict(zones_url):
zones_df = pd.read_csv(zones_url)
# Remove unecessary fields
zones_df.drop(['Shape_Leng', 'Shape_Area', 'zone', 'LocationID', 'borough'], axis=1, inplace=True)
# Convert DF to dictionary
zones_dict = zones_df.set_index('OBJECTID').to_dict('index')
# Add lat/long to each zone
for zone in zones_dict:
shape = shapely.wkt.loads(zones_dict[zone]['the_geom'])
zones_dict[zone]['long'] = shape.centroid.x
zones_dict[zone]['lat'] = shape.centroid.y
return zones_dict
def get_zone_lat(zones_dict, zone_id):
return zones_dict[zone_id]['lat']
def get_zone_long(zones_dict, zone_id):
return zones_dict[zone_id]['long']
def clean_df(df):
return df[(df.fare_amount > 0) & (df.fare_amount <= 500) &
(df.PULocationID > 0) & (df.PULocationID <= 263) &
(df.DOLocationID > 0) & (df.DOLocationID <= 263)]
# To Compute Haversine distance
def sphere_dist(pickup_lat, pickup_lon, dropoff_lat, dropoff_lon):
"""
Return distance along great radius between pickup and dropoff coordinates.
"""
#Define earth radius (km)
R_earth = 6371
#Convert degrees to radians
pickup_lat, pickup_lon, dropoff_lat, dropoff_lon = map(np.radians,
[pickup_lat, pickup_lon,
dropoff_lat, dropoff_lon])
#Compute distances along lat, lon dimensions
dlat = dropoff_lat - pickup_lat
dlon = dropoff_lon - pickup_lon
#Compute haversine distance
a = np.sin(dlat/2.0)**2 + np.cos(pickup_lat) * np.cos(dropoff_lat) * np.sin(dlon/2.0)**2
return 2 * R_earth * np.arcsin(np.sqrt(a))
def radian_conv(degree):
"""
Return radian.
"""
return np.radians(degree)
def add_airport_dist(dataset):
"""
Return minumum distance from pickup or dropoff coordinates to each airport.
JFK: <NAME> International Airport
EWR: Newark Liberty International Airport
LGA: LaGuardia Airport
SOL: Statue of Liberty
NYC: Newyork Central
"""
jfk_coord = (40.639722, -73.778889)
ewr_coord = (40.6925, -74.168611)
lga_coord = (40.77725, -73.872611)
sol_coord = (40.6892,-74.0445) # Statue of Liberty
nyc_coord = (40.7141667,-74.0063889)
pickup_lat = dataset['pickup_latitude']
dropoff_lat = dataset['dropoff_latitude']
pickup_lon = dataset['pickup_longitude']
dropoff_lon = dataset['dropoff_longitude']
pickup_jfk = sphere_dist(pickup_lat, pickup_lon, jfk_coord[0], jfk_coord[1])
dropoff_jfk = sphere_dist(jfk_coord[0], jfk_coord[1], dropoff_lat, dropoff_lon)
pickup_ewr = sphere_dist(pickup_lat, pickup_lon, ewr_coord[0], ewr_coord[1])
dropoff_ewr = sphere_dist(ewr_coord[0], ewr_coord[1], dropoff_lat, dropoff_lon)
pickup_lga = sphere_dist(pickup_lat, pickup_lon, lga_coord[0], lga_coord[1])
dropoff_lga = sphere_dist(lga_coord[0], lga_coord[1], dropoff_lat, dropoff_lon)
pickup_sol = sphere_dist(pickup_lat, pickup_lon, sol_coord[0], sol_coord[1])
dropoff_sol = sphere_dist(sol_coord[0], sol_coord[1], dropoff_lat, dropoff_lon)
pickup_nyc = sphere_dist(pickup_lat, pickup_lon, nyc_coord[0], nyc_coord[1])
dropoff_nyc = sphere_dist(nyc_coord[0], nyc_coord[1], dropoff_lat, dropoff_lon)
dataset['jfk_dist'] = pickup_jfk + dropoff_jfk
dataset['ewr_dist'] = pickup_ewr + dropoff_ewr
dataset['lga_dist'] = pickup_lga + dropoff_lga
dataset['sol_dist'] = pickup_sol + dropoff_sol
dataset['nyc_dist'] = pickup_nyc + dropoff_nyc
return dataset
def add_datetime_info(dataset):
#Convert to datetime format
dataset['pickup_datetime'] = pd.to_datetime(dataset['tpep_pickup_datetime'],format="%Y-%m-%d %H:%M:%S")
dataset['hour'] = dataset.pickup_datetime.dt.hour
dataset['day'] = dataset.pickup_datetime.dt.day
dataset['month'] = dataset.pickup_datetime.dt.month
dataset['weekday'] = dataset.pickup_datetime.dt.weekday
dataset['year'] = dataset.pickup_datetime.dt.year
return dataset
def fetch_data(context : MLClientCtx, taxi_records_csv_path: DataItem, zones_csv_path: DataItem):
context.logger.info('Reading taxi records data from {}'.format(taxi_records_csv_path))
taxi_records_dataset = taxi_records_csv_path.as_df()
context.logger.info('Reading zones data from {}'.format(zones_csv_path))
zones_dataset = zones_csv_path.as_df()
target_path = path.join(context.artifact_path, 'data')
context.logger.info('Saving datasets to {} ...'.format(target_path))
# Store the data sets in your artifacts database
context.log_dataset('nyc-taxi-dataset', df=taxi_records_dataset, format='csv',
index=False, artifact_path=target_path)
context.log_dataset('zones-dataset', df=zones_dataset, format='csv',
index=False, artifact_path=target_path)
def get_zones_dict(zones_df):
# Remove unecessary fields
zones_df.drop(['Shape_Leng', 'Shape_Area', 'zone', 'LocationID', 'borough'], axis=1, inplace=True)
# Convert DF to dictionary
zones_dict = zones_df.set_index('OBJECTID').to_dict('index')
# Add lat/long to each zone
for zone in zones_dict:
shape = shapely.wkt.loads(zones_dict[zone]['the_geom'])
zones_dict[zone]['long'] = shape.centroid.x
zones_dict[zone]['lat'] = shape.centroid.y
return zones_dict
def get_zone_lat(zones_dict, zone_id):
return zones_dict[zone_id]['lat']
def get_zone_long(zones_dict, zone_id):
return zones_dict[zone_id]['long']
def transform_dataset(context : MLClientCtx, taxi_records_csv_path: DataItem, zones_csv_path: DataItem):
context.logger.info('Begin datasets transform')
context.logger.info('zones_csv_path: ' + str(zones_csv_path))
zones_df = zones_csv_path.as_df()
# Get zones dictionary
zones_dict = get_zones_dict(zones_df)
train_df = taxi_records_csv_path.as_df()
# Clean DF
train_df = clean_df(train_df)
# Enrich DF
train_df['pickup_latitude'] = train_df.apply(lambda x: get_zone_lat(zones_dict, x['PULocationID']), axis=1 )
train_df['pickup_longitude'] = train_df.apply(lambda x: get_zone_long(zones_dict, x['PULocationID']), axis=1 )
train_df['dropoff_latitude'] = train_df.apply(lambda x: get_zone_lat(zones_dict, x['DOLocationID']), axis=1 )
train_df['dropoff_longitude'] = train_df.apply(lambda x: get_zone_long(zones_dict, x['DOLocationID']), axis=1 )
train_df = add_datetime_info(train_df)
train_df = add_airport_dist(train_df)
train_df['pickup_latitude'] = radian_conv(train_df['pickup_latitude'])
train_df['pickup_longitude'] = radian_conv(train_df['pickup_longitude'])
train_df['dropoff_latitude'] = radian_conv(train_df['dropoff_latitude'])
train_df['dropoff_longitude'] = radian_conv(train_df['dropoff_longitude'])
train_df.drop(['VendorID', 'tpep_pickup_datetime', 'tpep_dropoff_datetime', 'congestion_surcharge', 'improvement_surcharge', 'pickup_datetime',
'extra', 'mta_tax', 'tip_amount', 'tolls_amount', 'total_amount', 'RatecodeID', 'store_and_fwd_flag',
'PULocationID', 'DOLocationID', 'payment_type'],
axis=1, inplace=True, errors='ignore')
# Save dataset to artifact
target_path = path.join(context.artifact_path, 'data')
context.log_dataset('nyc-taxi-dataset-transformed', df=train_df, artifact_path=target_path, format='csv')
context.logger.info('End dataset transform')
params = {
'boosting_type':'gbdt',
'objective': 'regression',
'nthread': 4,
'num_leaves': 31,
'learning_rate': 0.05,
'max_depth': -1,
'subsample': 0.8,
'bagging_fraction' : 1,
'max_bin' : 5000 ,
'bagging_freq': 20,
'colsample_bytree': 0.6,
'metric': 'rmse',
'min_split_gain': 0.5,
'min_child_weight': 1,
'min_child_samples': 10,
'scale_pos_weight':1,
'zero_as_missing': True,
'seed':0,
'num_rounds':50000
}
def train_model(context: MLClientCtx, input_ds: DataItem):
context.logger.info('Begin training')
context.logger.info('LGBM version is ' + str(lgbm.__version__))
train_df = input_ds.as_df()
y = train_df['fare_amount']
train_df = train_df.drop(columns=['fare_amount'])
train_df = train_df.drop(train_df.columns[[0]], axis=1)
x_train,x_test,y_train,y_test = train_test_split(train_df,y,random_state=123,test_size=0.10)
train_set = lgbm.Dataset(x_train, y_train, silent=False,categorical_feature=['year','month','day','weekday'])
valid_set = lgbm.Dataset(x_test, y_test, silent=False,categorical_feature=['year','month','day','weekday'])
model = lgbm.train(params, train_set = train_set, num_boost_round=10000,early_stopping_rounds=500,verbose_eval=500, valid_sets=valid_set)
context.log_model('FareModel',
body=dumps(model),
artifact_path=context.artifact_subpath("models"),
model_file="FareModel.pkl")
context.logger.info('End training')
# +
# nuclio: end-code
# -
# ## Set Input Paths
taxi_records_csv_path = 'https://s3.wasabisys.com/iguazio/data/Taxi/yellow_tripdata_2019-01_subset.csv'
zones_csv_path = 'https://s3.wasabisys.com/iguazio/data/Taxi/taxi_zones.csv'
# ## Convert Code to a Function
taxi_func = mlrun.code_to_function(name='taxi',
kind='job',
image='mlrun/mlrun',
requirements=['lightgbm', 'shapely'])
# ## Run `fetch_data` Locally
#
# We can test out code locally, by calling the function with `local` parameter set to `True`
fetch_data_run = taxi_func.run(handler='fetch_data',
inputs={'taxi_records_csv_path': taxi_records_csv_path,
'zones_csv_path': zones_csv_path},
local=True)
fetch_data_run.outputs
# ## Run on the Cluster
# ### Prepare Cluster Function
# Create an MLRun function and create a custom image for it (that uses shapely).
from mlrun.platforms import auto_mount
taxi_func.apply(auto_mount())
taxi_func.deploy()
fetch_data_run = taxi_func.run(name='fetch_data',
handler='fetch_data',
inputs={'taxi_records_csv_path': taxi_records_csv_path,
'zones_csv_path': zones_csv_path})
fetch_data_run.outputs
# ## Transform the Dataset
transform_dataset_run = taxi_func.run(name='transform_dataset',
handler='transform_dataset',
inputs={'taxi_records_csv_path': fetch_data_run.outputs['nyc-taxi-dataset'],
'zones_csv_path': fetch_data_run.outputs['zones-dataset']})
transform_dataset_run.outputs
# ## Train Model
train_model_run = taxi_func.run(name='train_model',
handler='train_model',
inputs={'input_ds': transform_dataset_run.outputs['nyc-taxi-dataset-transformed']})
train_model_run.outputs
# ## Serving
# The model serving class is in model-serving.ipynb.
# +
serving = mlrun.code_to_function(filename=path.abspath('model-serving.ipynb')).apply(auto_mount())
serving.spec.default_class = 'LGBMModel'
serving.add_model('taxi-serving', train_model_run.outputs['FareModel'])
serving_address = serving.deploy()
# -
my_data = '''{"inputs":[[1,0.80,0.711950,-1.291073,0.712059,1.290988,13,1,1,1,2019,47.274013,40.386065,16.975747,26.587155,18.925788]]}'''
serving.invoke('/v2/models/taxi-serving/predict', my_data)
# ## Kubeflow Pipeline
# ### Create Project Object
# +
project_path = path.abspath('conf')
project = mlrun.new_project(project_name_base,
context=project_path,
init_git=True,
user_project=True)
project.set_function(f'db://{project.name}/taxi')
project.set_function(f'db://{project.name}/model-serving')
# -
# ### Create the Workflow
# +
# %%writefile {path.join(project_path, 'workflow.py')}
from kfp import dsl
from mlrun.platforms import auto_mount
funcs = {}
taxi_records_csv_path = 'https://s3.wasabisys.com/iguazio/data/Taxi/yellow_tripdata_2019-01_subset.csv'
zones_csv_path = 'https://s3.wasabisys.com/iguazio/data/Taxi/taxi_zones.csv'
# init functions is used to configure function resources and local settings
def init_functions(functions: dict, project=None, secrets=None):
for f in functions.values():
f.apply(auto_mount())
@dsl.pipeline(
name="NYC Taxi Demo",
description="Convert ML script to MLRun"
)
def kfpipeline():
# build our ingestion function (container image)
builder = funcs['taxi'].deploy_step(skip_deployed=True)
# run the ingestion function with the new image and params
ingest = funcs['taxi'].as_step(
name="fetch_data",
handler='fetch_data',
image=builder.outputs['image'],
inputs={'taxi_records_csv_path': taxi_records_csv_path,
'zones_csv_path': zones_csv_path},
outputs=['nyc-taxi-dataset', 'zones-dataset'])
# Join and transform the data sets
transform = funcs["taxi"].as_step(
name="transform_dataset",
handler='transform_dataset',
inputs={"taxi_records_csv_path": ingest.outputs['nyc-taxi-dataset'],
"zones_csv_path" : ingest.outputs['zones-dataset']},
outputs=['nyc-taxi-dataset-transformed'])
# Train the model
train = funcs["taxi"].as_step(
name="train",
handler="train_model",
inputs={"input_ds" : transform.outputs['nyc-taxi-dataset-transformed']},
outputs=['FareModel'])
# Deploy the model
deploy = funcs["model-serving"].deploy_step(models={"taxi-serving_v1": train.outputs['FareModel']}, tag='v2')
# -
project.set_workflow('main', 'workflow.py', embed=True)
project.save()
# ### Run the Workflow
artifact_path = path.abspath('./pipe/{{workflow.uid}}')
run_id = project.run(
'main',
arguments={},
artifact_path=artifact_path,
dirty=True,
watch=True)
| docs/howto/convert-mlrun-code.ipynb |
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Scala
// language: scala
// name: scala
// ---
// # Scala API to EasyForm
// +
val form : EasyForm = new com.twosigma.beakerx.scala.easyform.EasyForm("Form and Run")
form.addTextField("first")
form.put("first", "First")
form.addTextField("last")
form.put("last", "Last")
form.addButton("Go!", "run")
form
// -
// You can access the values from the form with get method:
// + tags=["run"]
"Good morning " + form.get("first") + " " + form.get("last")
// -
// You can set default values on the fields with put method:
form.put("first", "Beaker")
form.put("last", "Berzelius")
// ## Event Handlers for Smarter Forms
//
// You can use `onInit` and `onChange` to handle component events. For button events use `actionPerformed` or `addAction`.
// +
//You can use onInit and onChange to handle component events. For button events use actionPerfromed or addAction.
val f1 = new EasyForm("Form and Run")
val first = f1.addTextField("first", 15)
val last = f1.addTextField("last", 15).onInit(new EasyFormListener {
override def execute(value: String): Unit = {
f1.put("last", "setinit1");
}
}).onChange(new EasyFormListener {
override def execute(value: String): Unit = {
first.setValue(f1.get("last") + " extra");
}
});
val button = f1.addButton("action", "action_button")
button.actionPerformed = new EasyFormListener {
override def execute(value: String): Unit = f1.put("last", "action done");
}
f1
// + tags=["action_button"]
f1.get("last") + ", " + f1.get("first")
// -
f1.put("last", "new Value")
f1.put("first", "new Value2")
// ## All Kinds of Fields
// +
val g = new EasyForm("Field Types")
g.addTextField("Short Text Field", 10)
g.addTextField("Text Field")
g.addPasswordField("Password Field", 10)
g.addTextArea("Text Area")
g.addTextArea("Tall Text Area", 10, 5)
g.addCheckBox("Check Box")
val options = Seq("a", "b", "c", "d")
g.addComboBox("Combo Box", options)
g.addComboBox("Editable Combo", options, true)
g.addList("List", options)
g.addList("List Single", options, false)
g.addList("List Two Row", options, 2)
g.addCheckBoxes("Check Boxes", options)
g.addCheckBoxes("Check Boxes H", options, EasyForm.HORIZONTAL)
g.addRadioButtons("Radio Buttons", options)
g.addRadioButtons("Radio Buttons H", options, EasyForm.HORIZONTAL)
g.addDatePicker("Date")
g.addButton("Go!", "run2")
g
// + tags=["run2"]
import java.util.HashMap
import java.util.function.Consumer
val result = new HashMap[String, Object]()
g.keySet().forEach(
new Consumer[String] {
override def accept(key: String): Unit = {
result.put(key, g.get(key))
}
}
)
result
// -
// ### Dates
val gdp = new EasyForm("Field Types")
val date = gdp.addDatePicker("Date")
gdp
gdp.get("Date")
// ### SetDate
// +
import java.util.Date
val easyForm = new EasyForm("Field Types")
// Setup date via String (format is yyyyMMdd) also works
easyForm.addDatePicker("Date").setDate(new Date())
easyForm
// -
// ### Default Values
val h = new EasyForm("Default Values")
h.addTextArea("Default Value", "Initial value")
h.addCheckBox("Default Checked", true)
h.addButton("Press", "check")
h
// + tags=["check"]
val result = new HashMap[String, Object]()
h.keySet().forEach(
new Consumer[String] {
override def accept(key: String): Unit = {
result.put(key, h.get(key))
}
}
)
result
// -
// ## JupyterJSWidgets work with EasyForm
//
// The widgets from [JupyterJSWidgets](JavaWidgets.ipynb) are compatible and can appear in forms.
// +
import com.twosigma.beakerx.widget.IntSlider
val w = new IntSlider()
val widgetForm = new EasyForm("pyhton widgets")
widgetForm.addButton("Press", "widget_test")
widgetForm.addWidget("IntSlider",w)
widgetForm
// + tags=["widget_test"]
widgetForm.get("IntSlider")
// -
| doc/scala/EasyForm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from captcha_utils.icp_generator import *
from concurrent.futures import ThreadPoolExecutor
def create(*args):
username = ''
password = ''
cr = CaptchaRequest(username, password)
cr.run(1000)
with ThreadPoolExecutor(max_workers=10) as executor:
executor.map(create, range(10))
# -
| image/qrcode/generate_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Libraries
import numpy as np
import pandas as pd
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import matplotlib.pyplot as plt
import matplotlib.colors as mc
import seaborn as sns
# Register converters to plot datetime axis
pd.plotting.register_matplotlib_converters()
# Setup matplotlib plotting
# %matplotlib inline
# -
# Load dataset
ds = pd.read_csv('data/ready.csv', parse_dates=['datetime'])
# Sort dataset
ds.sort_values(by='datetime', ascending=True, inplace=True)
# Show dataset
ds.head()
# +
# Add year, month, day (of the week) and hour, separately
ds['year'] = ds.datetime.dt.year
ds['month'] = ds.datetime.dt.month
ds['day'] = ds.datetime.dt.dayofweek
ds['hour'] = ds.datetime.dt.hour
# Check dataset
ds.head()
# +
# Hourly energy consumption, by year
# Make plot
fig, ax = plt.subplots(figsize=(15, 5))
_ = ax.set_title('Hourly energy consumption, by year')
_ = sns.violinplot(data=ds, x='year',y='AEP', ax=ax)
_ = plt.show()
# +
# Hourly energy consumption, by month of the year
# Make plot
fig, ax = plt.subplots(figsize=(15, 7))
_ = ax.set_title('Hourly energy consumption, by month')
_ = sns.violinplot(data=ds, x='month',y='AEP', ax=ax)
_ = plt.savefig('images/aep_by_month.png')
_ = plt.show()
# +
# Hourly energy consumption, by day of the week
# Make plot
fig, ax = plt.subplots(figsize=(15, 5))
_ = ax.set_title('Hourly energy consumption, by day of the week')
_ = sns.violinplot(data=ds, x='day',y='AEP', ax=ax)
_ = plt.show()
# +
# Hourly energy consumption, by hour of the day
# Make plot
fig, ax = plt.subplots(figsize=(15, 7))
_ = ax.set_title('Hourly energy consumption, by hour')
_ = sns.violinplot(data=ds, x='hour',y='AEP', ax=ax)
_ = plt.savefig('images/aep_by_hour.png')
_ = plt.show()
# -
| eda.ipynb |
// -*- coding: utf-8 -*-
// ---
// jupyter:
// jupytext:
// text_representation:
// extension: .scala
// format_name: light
// format_version: '1.5'
// jupytext_version: 1.14.4
// kernelspec:
// display_name: Apache Toree - Scala
// language: scala
// name: apache_toree_scala
// ---
// authors = ["mageswaran"]
// categories = ["Apache Spark", "Distributed Computing"]
// date = "2019-04-07T14:30:00+05:30"
// description = "Apache Spark Dataset API Usage"
// tags = ["SQL", "Dataset", "DataFrame", "Spark"]
// title = "Apache Spark Dataset API Usage"
// # [Apache Spark Dataset](https://spark.apache.org/docs/latest/sql-programming-guide.html)
// - Java @ https://linuxize.com/post/install-java-on-ubuntu-18-04/
// - Toree
// - This notebook uses Toree Spark kernel to run. Please find the installation guide @ https://toree.apache.org/docs/current/user/installation/
//
// ```
// #Use the Java 8 version of OpenJDK
// sudo update-alternatives --config java
// sudo update-alternatives --config javac
//
// export JAVA_HOME=/usr/lib/jvm/java-8-openjdk-amd64/
// pip install toree
// jupyter toree install --interpreters=Scala,PySpark,SQL --spark_home=/opt/data/spark-2.4.0-bin-hadoop2.7/ --user
// ```
//
// This notebook is about exploring the Dataset APIs.
// ## Some Caveats
// - The types of the columns disappear when running untyped transformations
// - The names of the columns sometimes disappear partially or completely when typed transformation is used
// - Missing value handling
//
//
// DataFrame + Data Types (`case class`) = Dataset
//
// ## Untyped transformations
// - Example adding a new column to your Dataset, the result will be a DataFrame, even if you define the type of the new column.
//
// If you wanted to work within a Dataset environment, then the steps are the followings for untyped transformations:
//
// * Have a Dataset
// * Apply the function to it
// * The result is a DataFrame
// * Convert the result to Dataset by defining the types of the columns in a case class
//
// ## Typed transformations
// When we use typed transformation, the output is a Dataset with proper types. But if there is a change in the columns (fewer or more columns, or new columns created) then the name of the columns we see at the display will be valid only in DataFrame sense (“columnName”). The Dataset reference _.columnName won’t work in these cases, but you can refer to the columns with ._1 or ._2 etc. If you would like to have proper column names, use a case class again.
//
//
// +
import org.apache.spark.sql.{Dataset, DataFrame, SparkSession}
import org.apache.spark.sql.functions._
import org.apache.spark.sql._
val sqlContext = spark.sqlContext
// -
// # 1. CREATING DATASET
// ## MANUALLY
// - Define the data as a sequence.
// - Convert the sequence to DataFrame and define the column names
// - Define the type of the columns by a case class (using proper column names is a must)
// - Convert to Dataset
//In this example we create a small Dataset with two columns: the first column contains the name of Star Wars
//Characters and the second one lists the name of their friends.
val df = Seq(("Yoda", "Obi-Wan Kenobi"),
("Anakin Skywalker", "<NAME>"),
("Luke Skywalker", "Han Solo, Leia Skywalker"),
("Leia Skywalker", "Obi-Wan Kenobi"),
("<NAME>patine", "Anakin Skywalker"),
("Han Solo", "Leia Skywalker, Luke Skywalker, Obi-Wan Kenobi, Chewbacca"),
("Obi-Wan Kenobi", "Yoda, <NAME>"),
("R2-D2", "C-3PO"),
("C-3PO", "R2-D2"),
("<NAME>", "<NAME>"),
("Chewbacca", "<NAME>"),
("<NAME>", "<NAME>"),
("Jabba", "<NAME>")
).toDF("name", "friends")
df.show()
df.dtypes
case class Friends(name: String, friends: String)
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this) //Ensure case class is avaiable in other cells
val friends_ds = df.as[Friends]
friends_ds.show()
// ## MISSING VALUES
case class Friends_Missing(Who: String, friends: Option[String])
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
val ds_missing = Seq(
("Yoda", Some("Obi-Wan Kenobi")),
("Anakin Skywalker", Some("Sheev Palpatine")),
("Luke Skywalker", None),
("Leia Skywalker", Some("Obi-Wan Kenobi")),
("Sheev Palpatine", Some("Anakin Skywalker")),
("Han Solo", Some("Leia Skywalker, Luke Skywalker, Obi-Wan Kenobi"))
).toDF("Who", "friends").as[Friends_Missing]
ds_missing.show()
// ## READING FROM CSV
//
// The steps of the csv reading:
//
// - Define the names and the types of the columns in a case class. Note that the names of the columns must be identical with the colum names in the header of the file!
// - Read the csv into a DataFrame
// - Convert into Dataset
//
// The result of the read is a DataFrame and as we have seen earlier, the .as[Characters] at the end of the expressions converts it to Dataset.
// %%pyspark
import os
os.system("wget https://www.balabit.com/blog/wp-content/uploads/2016/12/StarWars.csv")
case class Characters(name: String,
height: Integer,
weight: Option[Integer],
eyecolor: Option[String],
haircolor: Option[String],
jedi: String,
species: String)
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
val characters_ds: Dataset[Characters] = sqlContext.
read.
option("header", "true").
option("delimiter", ";").
option("inferSchema", "true").
csv("StarWars.csv").
as[Characters]
// Some explanation to the read function:
//
// - option(“header”, “true”) – the column names are defined in the first row of the file
// - option(“delimiter”, “;”) – the delimiter is the ;
// - option(“inferSchema”, “true”) – detect the column types automatically. The schema could also be given manually (see in the Subsidiary comment below).
characters_ds.show()
characters_ds.dtypes
characters_ds.filter(x => x.eyecolor == "brown").show()
// Doesnt work!
// This is because of `Option()` type in `case class`
// Although we don’t see which columns are Option types, we have to keep it in mind when working with them later.
characters_ds.filter(x => x.eyecolor == Some("brown")).show()
// What if we have not considered null values for non-string type column? Lets see it!
// +
case class Characters_BadType(name: String,
height: Integer,
weight: Integer,
eyecolor: String,
haircolor: String,
jedi: String,
species: String)
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
// -
val characters_BadType_ds: Dataset[Characters_BadType] = sqlContext.
read.
option("header", "true").
option("delimiter", ";").
option("inferSchema", "true").
csv("StarWars.csv").
as[Characters_BadType]
characters_BadType_ds.show()
val characters_BadType_ds2 = characters_BadType_ds.filter(x=> x.jedi=="no_jedi")
characters_BadType_ds2.show()
characters_BadType_ds2.filter(x=> x.haircolor=="brown").show()
// **Error expected**
characters_BadType_ds2.filter(x=> x.weight>79).show()
characters_BadType_ds2.filter(x=> x.weight!=null && x.weight>79).show()
// The conslusion here is that if you can not trust a column has all the values defined then it is safer to use Option in the case class to handle missing values. Use types without Option[] only for columns where it is 100% sure that no missing values can appear (applies for numeric, string or all other types as well).
// ### DEFINING THE SCHEMA OF THE DATAFRAME MANUALLY
// This part is related to DataFrames rather than Datasets.
// When reading the csv file into a DataFrame, we can define the schema manually. We can get the idea that we could control (or detect) missing values during the reading process if we use nullable=false in the schema. Let’s try this. The first step is to create the schema manually by defining the column names, types and whether nullable is true or false. Before creating the schema import some types.
import org.apache.spark.sql.types.{StructType, StructField, StringType, IntegerType}
val DF_schema = StructType(Array(
StructField("name", StringType, false),
StructField("height", IntegerType, false),
StructField("weight", IntegerType, false),
StructField("eyecolor", StringType, false),
StructField("haircolor", StringType, false),
StructField("jedi", StringType, false),
StructField("species", StringType, false)))
DF_schema.printTreeString
val characters1_df = sqlContext.
read.
format("com.databricks.spark.csv").
option("header", "true").
option("delimiter", ";").
schema(DF_schema).
csv("StarWars.csv")
characters1_df.show()
characters1_df.printSchema
// Did you notice? **nullbale** in DataFrame has become **true**
characters1_df.filter($"weight"<75).show()
// # 2. JOINING DATASETS
// We work further with the following two Datasets: the first one called friends_ds created manually and a second one called characters_ds which was read in from the csv file. Let’s join them by the name of the characters.
//
// **Inner join**
// If we use inner join, then the result table will contain the keys included in both Datasets.
// Unfortunately the default syntax of join in Spark keeps the key fields from both Datasets. Thus having “name” column in both Datsets results in a DataFrame having two identical columns with identical names, and it is impossible to work with them later on, as we get the following error:
// Reference ‘name’ is ambiguous
val bad_join_df = characters_ds.join(friends_ds, characters_ds.col("name") === friends_ds.col("name"))
bad_join_df.show()
// **!!! Did you notice?** two `name` column
bad_join_df.select($"name")
// The solution to the problem above is to use Seq(“name”) in case the keys have the same appelation in your Datasets.
val sw_df = characters_ds.join(friends_ds, Seq("name"))
sw_df.show()
// Although we created an inner join of two Datasets, thus the column types were all defined, the result of the join is a DataFrame.
// In order to get a Dataset again, create a case class for the names and the types of the joined data and convert the DataFrame to Dataset.
case class SW(name: String,
height: Integer,
weight: Option[Integer],
eyecolor: Option[String],
haircolor: Option[String],
jedi: String,
species: String,
friends: String)
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
val sw_ds = sw_df.as[SW]
sw_ds.show()
// #### Other joins
// If we have to keep all the keys from one of the Datasets we can use “left_outer” or “right_outer” properly.
characters_ds.join(ds_missing, characters_ds.col("name") === ds_missing.col("Who"), "left_outer").show()
// ## 3. SELECTING COLUMNS
// The first surprise was how overcomplicated is to select some columns from a Dataset. We already know the name and the type of the columns, but still if we need a part of the Dataset columns then the names or the types should be defined again. Let’s see the possibilities:
//
// - If we use map, then the result is a Dataset so the column types are inherited but the column names are lost.
// - If we use select and the column names, then the result is a DataFrame, so the type of the columns are lost.
// - If we use select and provide the column names AND the column types, then the result is a Dataset with seemingly proper column names and proper types.
//
// - map : Dataset -> Dataset - Column Names
// - select + column names -> DataFrame - Column Types
// - select + column names & types -> Dataset
sw_ds.map(x => (x.name, x.weight))
sw_ds.map(x => (x.name, x.weight)).show()
sw_ds.select("name", "weight")
sw_ds.select("name", "weight").show()
sw_ds.select($"name".as[String], $"weight".as[Integer])
sw_ds.select($"name".as[String], $"weight".as[Integer]).show()
// This last solution seems to work well but it has two problems:
//
// - 1.The result is a Dataset[(String, Integer)]. Despite seeing the column names in the display these names are valid only if we use the Dataset as a DataFrame. So we can refer to the columns as “weight” in the untyped expressions (for example .select(“weight”) ), but we can not use the column names in typed expresions where _.weight is needed. For example using gropupByKey(_.weight) or .map(x=> x.weight) after this selection step will result in the following error:
// error: value weight is not a member of (String, Integer)
// Instead of the column names we can refer to the columns in typed operation as ._1 or ._2. So although the names are inherited in a DataFrame sense they were lost in the Dataset sense. (Does it make sense?)
// - 2.When defining “weight”.as[Integer] we can not use “weight”.as[Option[Integer]] and this could lead us to the NullPointerException because there is a missing value in that column for example by using filter(x=> x._2 > 79)
//
// Either way the select is executed you will end up creating a proper case class. We can correct all 3 ways easily by using a new case class:
case class NameWeight(name: String, weight: Option[Integer])
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
// +
//1. corrected
sw_ds.map(x => NameWeight(x.name, x.weight))
// -
sw_ds.map(x => NameWeight(x.name, x.weight)).show()
//2. corrected
sw_ds.select("name", "weight").as[NameWeight]
sw_ds.select("name", "weight").as[NameWeight].show()
//3. corrected
sw_ds.select($"name".as[String], $"weight".as[Integer]).as[NameWeight]
sw_ds.select($"name".as[String], $"weight".as[Integer]).as[NameWeight].show()
// ## 4. RENAMING COLUMNS
// By renaming some of the columns we get a DataFrame. (At least I could not find a column renamer function producing a Dataset.)
//
// - If we use withColumnRenamed then we can rename the columns one-by-one, the result is a DataFrame.
// - We can convert the Dataset to DataFrame and define all new column names in one step and the result is obviously a DataFrame.
//
// It doesn’t matter which way is used to rename the columns, the result is a DataFrame. Finally we have to create a case class for the new column names and types and convert the DataFrame to Dataset.
sw_ds.withColumnRenamed("name", "Name")
sw_ds.withColumnRenamed("name", "Name").show()
// The output shows that the result is a DataFrame.
// We can rename more columns by chianing this function.
sw_ds.withColumnRenamed("name", "Who").withColumnRenamed("jedi", "Religion").show()
// If we would like to rename all the columns, then a shorter way could be to convert the Dataset into a DataFrame by .toDF and then define the new column names.
//
// The column names in the case class are not case sensitive. If you changed only upper case – lower case pairs in the column names, then your original case class should still work. But if the new column names are different in at least one letter, then a new case class definition is needed with proper column names.
sw_ds.toDF(Seq("Name", "Height", "Weight", "Eyecolor", "Haircolor", "Jedi", "Species", "Friends"): _*).as[SW]
sw_ds.toDF(Seq("Name", "Height", "Weight", "Eyecolor", "Haircolor", "Jedi", "Species", "Friends"): _*).as[SW].show()
// +
// sw_ds.toDF(Seq("WHO", "Height", "Weight", "Eyecolor", "Haircolor", "Jedi", "Species", "Friends"): _*).as[S
// -
case class SW2(WHO: String,
height: Integer,
weight: Option[Integer],
eyecolor: Option[String],
haircolor: Option[String],
jedi: String,
species: String,
friends: String)
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
sw_ds.toDF(Seq("WHO", "Height", "Weight", "Eyecolor", "Haircolor", "Jedi", "Species", "Friends"): _*).as[SW2]
sw_ds.toDF(Seq("WHO", "Height", "Weight", "Eyecolor", "Haircolor", "Jedi", "Species", "Friends"): _*).as[SW2].show()
// # 5. ADDING NEW COLUMNS
// There are several ways to add new columns to your Dataset based on what kind of column is created. I show the main types with examples. Independently of how you added the new column and whether the type was defined or not, the result will be a DataFrame. So if you need a Dataset output, then define a proper case class and convert the DataFrame into a Dataset.
//
// ### CONSTANT COLUMN
// Adding a constant column is easy. Use the withColumn function and provide the name of the new column and the lit() with the value inside the brackets. The result is a DataFrame even if you define the type of the new colmn by sw_ds.withColumn(“count”, lit(1).as[Integer])
sw_ds.withColumn("count", lit(1))
sw_ds.withColumn("count", lit(1)).show()
// ### EXPRESSION1 – TYPE1
// In these expressions the function needs only one string of input, so we can simply use “colum_name”. In the example I calculated the logarithm of the weight of each character.`
sw_ds.withColumn("log_weight", log("weight"))
sw_ds.withColumn("log_weight", log("weight")).show()
// ### EXPRESSION2 – TYPE2
// In case when the transformation needs more than a string input, we have to use dataset_name(“colum_name”) when referring to a column of the original Dataset.
//
// For example we can calculate the Body mass index of the characters.
sw_ds.withColumn("BMI", sw_ds("weight")/(sw_ds("height")*sw_ds("height")/10000))
sw_ds.withColumn("BMI", sw_ds("weight")/(sw_ds("height")*sw_ds("height")/10000)).show()
// ### USER DEFINED FUNCTION
// Finally if using expressions originally not defined for columns but for primitive types like Integer or String, then we have to create user defined functions aka UDFs.
//
// The example I show creates a column containing a tuple made from two columns in the Dataset. We can create tuples from primitive type items, so we define an UDF and then use it on columns.
import scala.reflect.runtime.universe.TypeTag
def createTuple2[Type_x: TypeTag, Type_y: TypeTag] = udf[(Type_x, Type_y), Type_x, Type_y]((x: Type_x, y: Type_y) => (x, y))
sw_ds.withColumn("Jedi_Species", createTuple2[String, String].apply(sw_ds("jedi"), sw_ds("species")))
sw_ds.withColumn("Jedi_Species", createTuple2[String, String].apply(sw_ds("jedi"), sw_ds("species"))).show()
// We can create tuple column from columns with missing values as well.
sw_ds.withColumn("Name_Weight", createTuple2[String, Option[Integer]].apply(sw_ds("name"), sw_ds("weight")))
sw_ds.withColumn("Name_Weight", createTuple2[String, Option[Integer]].apply(sw_ds("name"), sw_ds("weight"))).show()
// # 6. FILTERING ROWS
// By filtering we can get a part of the rows of the Dataset. The good news is that the names and the types of the columns do not change at all, so the result of a filter is always a Dataset with proper column names. But we have to be very careful when working with columns containing missing values. In the filter function we have to define how to filter the defined values and how to filter the missing values. Let’s see exmaples.
// Filter a string column with no missing values: select the humans in the Dataset.
sw_ds.filter(x => x.species=="human")
sw_ds.filter(x => x.species=="human").show()
// The same syntax could be used for eyecolor (which contains missing values) without getting error or warning. But the result is empty, although there are characters with brown eyes.
sw_ds.filter(x => x.eyecolor== "brown").show()
// The reason is that == operation works on different types, but a string value won’t be equal to any option value as they are represented by Some(value). There are two ways to handle the situation:
//
// - Use Some(value) in the filter
// - use .getOrElse() function and define what should be returned in case of missing values. In the exmple I use .getOrElse(“”) which provides the value if it was defined or an empty string if there was missing value in the record
//
// So let’s see both ways:
sw_ds.filter(x => x.eyecolor == Some("brown")).show()
sw_ds.filter(x => x.eyecolor.getOrElse("") == "brown").show()
// Filtering numeric columns without missing values works as expected: filter charactes whose height is less than 100 cm.
sw_ds.filter(x => x.height<100).show()
// If there might be missing values in a numeric column (for example the type is Option[Integer]) then the syntax above gives an error.
// sw_ds.filter(x => x.weight >=79)
// would end in
// …error: value >= is not a member of Option[Integer] …
//
// The solution is to use pattern matching and define explicitly the filter for Some() values and for None (missing) values.
//
//
sw_ds.filter(x => x.weight match {case Some(y) => y>=79
case None => false} ).show()
// # 7. GROUPBY AND AGGREGATING
// Calculate a function (mean, min, max etc.) of numeric colums by groups defined in a key column. The syntax is as expected but we have to define the type of the result columns in the aggregation function.
//
// - the key for groupby is given in: groupByKey(_.columnname)
// - the aggregation functions are given in .agg( function_name1(“columnName1”).as[new_type1], function_name2(“columnName2”).as[new_type2] )
//
// We can define several aggregation functions for different columns withinin one aggregation.
sw_ds.groupByKey(_.species).agg(max($"height").as[Integer], min($"height").as[Integer], mean($"weight").as[Double], count($"species").as[Long] )
sw_ds.groupByKey(_.species).agg(max($"height").as[Integer], min($"height").as[Integer], mean($"weight").as[Double], count($"species").as[Long] ).show()
// The same works for columns with missing values. Jabba was not included in the calculation as his weight is not known.
sw_ds.groupByKey(_.eyecolor).agg(mean($"weight").as[Double])
sw_ds.groupByKey(_.eyecolor).agg(mean($"weight").as[Double]).show()
// The key can contain missing values and the missing values will form a separate group in the groupByKey. The columns in the aggregateion function might also contain missing values and they will be ignored from numerical computations.
//
// Please note that the output is a Dataset with proper column types, but the column names can be used noly as DataFrame columns (“columnName”) and they could be referred by ._1 or ._2 etc as Dataset columns. For example .map(x => x.value) won’t work.
//
// ### GROUPBY MULTIPLE KEYS
// For using multiple keys in groupByKey create a tuple from the key columns.
sw_ds.groupByKey(x=>(x.species, x.jedi, x.haircolor)).agg(mean($"weight").as[Double], count($"species").as[Long])
sw_ds.groupByKey(x=>(x.species, x.jedi, x.haircolor)).agg(mean($"weight").as[Double], count($"species").as[Long] ).show()
// # 8. SORTING BY ROWS
// Sort is easy, there is no surprise in the synatx.
sw_ds.orderBy($"species".desc, $"weight")
sw_ds.orderBy($"species".desc, $"weight").show()
// # 9. APPENDING DATASETS
// Adding two Datasets with the same case class definition is a cake-walk.
sw_ds.union(sw_ds)
sw_ds.union(sw_ds).show()
// # 10. OTHER USEFUL FUNCTIONS
// DESCRIBE THE DATASET
// - get the number of records by using count()
// - get the number of columns by using .columns.size
// - get the schema by using printSchema or by dtypes
sw_ds.count()
sw_ds.columns.size
sw_ds.printSchema
sw_ds.dtypes
// ### SOME MORE AGGREGATION FUNCTIONS
// Calculate correlation between columns (optionally by groups)
sw_ds.agg(corr($"height", $"weight").as[Double])
sw_ds.agg(corr($"height", $"weight").as[Double]).show()
sw_ds.groupByKey(_.jedi).agg(corr($"height", $"weight").as[Double]).show()
// Get the first value by group
sw_ds.groupByKey(_.species).agg(first($"name").as[String]).show()
// ### OTHER USEFUL FUNCTIONS FOR CREATING NEW COLUMNS
// In the following examples we add new columns to the Dataset thus the result is a DataFrame. In order to get Dataset again create a proper case class and convert the result into Dataset.
//
// The first example is the hash function of a column.
sw_ds.withColumn("hashed_hair", hash(sw_ds("haircolor"))).show()
// The next example is calculating the size of a collection. For example get the number friends listed in the friends column. We need two steps:
//
// - split the friends column at string “, “. This way we get an Array of Strings. Note that map drops the column names, so after this step we have to refer to the splitted column as _2
// - use the size() to get the number of items are in that Array
sw_ds.
map(x => (x.name, x.friends.split(", ")) ).
withColumn("NrOfFriendsListed", size($"_2")).show()
// Add a monotonically increasing id into a new column using the function monotonically_increasing_id.
sw_ds.withColumn("id", monotonically_increasing_id).show()
//Create a column containing random numbers.
sw_ds.withColumn("random",rand).show()
//Calculate the lenght of strings in a column. For example count the length of the character names in our Dataset.
sw_ds.withColumn("name_lenth", length(sw_ds("name"))).show()
//We can also get the levenshtein distance between two string columns:
sw_ds.withColumn("name_species_diff", levenshtein(sw_ds("name"), sw_ds("species"))).show()
//Finally we can find the location of a substring within a string by using locate. In the example we look for the first occurrence of letter “S” in the name of the characters.
sw_ds.withColumn("Loc_y", locate("S", sw_ds("name"))).show()
// ## 11. FRIENDCOUNT EXAMPLE
//
// THERE IS NO SPARK TUTORIAL WITHOUT THE BELOVED WORDCOUNT EXAMPLE 😉
//
// I prepared a slightly modified version of the wordcount task. Let’s calculate how many times a charater was referred as a friend in the friends column.
//
// I solve this problem in two ways.
//
// First solution
// In the first solution I use only the friends column and do the following steps:
//
// - map – select the column friends
// - flatMap and split – split the strings in the friends column at “, ” – as a result every full name will be in a new row
// - groupByKey – the key is the new (splitted) column
// - count – get the counts
//
// So the result is how many times a character was mentioned as a friend.
//
// If you wanted to run wordcount, then split the text at spaces by using split(” “)
sw_ds.
map(x => x.friends).
flatMap(_.split(", ")).
groupByKey(_.toString).
count().
show()
// If the friends column has missing values, i.e. the type is Option[String] then we have to use the .getOrElse(“”) to handle missing values.
ds_missing.
map(x => x.friends).
flatMap(_.getOrElse("").split(", ")).
groupByKey(_.toString).
count().
show()
// Second solution
// In the second solution I keep the name column from the original Dataset as well. Thus we will see the name – friend pairs for every friend referred in a new row. This could be useful in case of a more complex question (for exmple how many friends of the character have letter “S” in their names). We could also count the number of friends listed by each character and the number of times a character was referred as a friend from the same Dataset.
//
// To get the name – friend pair Dataset do the following steps:
//
// use map to select columns name and friends splitted at string “, “
// use withcolumn to create a new column containig the exploded splitted friends. The explode creates a new row for every item in the splitted friend column. The first argument in the withColumn function is the name of the newly created column. If we write here _2, then we overwrite the splitted friend column.
// Let’s see the code in action:
import org.apache.spark.sql.functions.explode
sw_ds.
map(x => (x.name, x.friends.split(", ")) ).
withColumn("friend", explode($"_2")).
show()
// In the example above there is a _2 column containing the result of the split and a third friend column for every item in the _2 columns. In the next example I use _2 as the name of the new column, thus overwrite the splitted friend column, and rename the columns. Then a new case class is defined an the result is converted to Dataset. We will do more transfromation on this Dataset.
case class NameFriend(name: String, friend: String)
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
val NameFriend_df =sw_ds.
map(x => (x.name, x.friends.split(", ")) ).
withColumn("_2", explode($"_2")).
toDF(Seq("name", "friend"): _*)
val NameFriend_ds = NameFriend_df.as[NameFriend]
NameFriend_ds.show()
// Finally we will answer three different questions using the NameFriend_ds Dataset:
//
// A. How many times the characters were referred as a friend?
//
// Solution:
//
// - groupByKey where the key is the splitted and exploded referred friends column
// - count – calculate the number of occurrence of refrerred friends
// - orderBy – sort the values by decreasing popularity
//A.
NameFriend_ds.
groupByKey(_.friend).
count().
orderBy($"count(1)".desc).
show()
// Han Solo and Obi-Wan Kenobi were the most popular, they were mentioned by 3 other charates as their friends.
//
// B. How many friends were listed by the characters?
//
// Solution:
//
// - groupByKey where the key is name of the characters
// - count – calculate the number of occurrence of a name
// - orderBy – sort the values by decreasing number of listed friends
//B.
NameFriend_ds.
groupByKey(_.name).
count().
orderBy($"count(1)".desc).
show()
// Han Solo listed 4 friends, Luke listed 2, etc.
//
// C. How many friends were listed with letter “S” in their names by the characters?
//
// Solution:
//
// - create a case class containing a new Integer column
// - withColumn – add the new column with the position of letter “S”
// - convert the result into Dataset
// - filter rows where the position of “S” is greater than 0 (the remaining rows contain friend with letter “S”)
// - groupByKey – where the key is name of the characters
// - count the number of rows by characters in the filtered Dataset
// - orderBy – sort the values by decreasing number of friends with letter “S” in their names
case class NameFriendS_ds(name: String, friend: String, S_in_friend:Integer)
org.apache.spark.sql.catalyst.encoders.OuterScopes.addOuterScope(this)
//C.
NameFriend_ds.
withColumn("S_in_friend", locate("S", (NameFriend_ds("friend"))) ).
as[NameFriendS_ds].
filter(x=>x.S_in_friend>0).
groupByKey(_.name).
count().
orderBy($"count(1)".desc).
show()
// We can see for example that Han Solo and <NAME> have two friends `mwhose name contain letter “S”. Characters not listed in the output have no friends with letter “S”.
| notebooks/spark_dataset_cookbook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="dz5vVeCEg3dQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 122} executionInfo={"status": "ok", "timestamp": 1598602747118, "user_tz": -540, "elapsed": 16227, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04377306958369458217"}} outputId="879fb2e9-ce48-463e-89cd-0a2f5a3101ad"
from google.colab import drive
drive.mount('/content/drive')
# + id="kex5LHlk7Agv" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} executionInfo={"status": "ok", "timestamp": 1598602747119, "user_tz": -540, "elapsed": 16218, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04377306958369458217"}} outputId="fe9144bf-c386-495a-c9c5-611df09aab2f"
# cd /content/drive/My\ Drive/Transformer-master/
# + [markdown] id="6O-5cESj7DxK" colab_type="text"
# # ライブラリ読み込み
# + id="Llf_WS5g7Alo" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1598602803276, "user_tz": -540, "elapsed": 72370, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04377306958369458217"}} outputId="54a16924-1d9f-4439-fde1-ce17f07cdec8"
# !apt install aptitude
# !aptitude install mecab libmecab-dev mecab-ipadic-utf8 git make curl xz-utils file -y
# !pip install mecab-python3==0.6
# !pip install japanize_matplotlib
# + id="gcu8U3a07AoQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} executionInfo={"status": "ok", "timestamp": 1598602805829, "user_tz": -540, "elapsed": 74919, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04377306958369458217"}} outputId="47d812e1-5aec-4b3a-ef71-32d9180d7d7f"
import numpy as np
import os
import time
import MeCab
import preprocess_utils
import model
import weight_utils
import tensorflow.keras as keras
import tensorflow as tf
print(tf.__version__)
import matplotlib.pyplot as plt
import japanize_matplotlib
# %matplotlib inline
# + [markdown] id="IU7TTHws7Hfg" colab_type="text"
# # 日英翻訳データ ダウンロード
# + id="L7Wdgxda7ArC" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598602805830, "user_tz": -540, "elapsed": 74918, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}}
# # !wget http://www.manythings.org/anki/jpn-eng.zip
# # !unzip ./jpn-eng.zip
# + [markdown] id="0nqrPqRH7KRx" colab_type="text"
# # データ読み込み
# + id="9PTyLLU57AzG" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 340} executionInfo={"status": "ok", "timestamp": 1598602806457, "user_tz": -540, "elapsed": 75540, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}} outputId="6ab6653e-bb49-4702-e1d2-43ed767fc2f6"
dataset = preprocess_utils.CreateData(
corpus_path = './jpn.txt',
do_shuffle=True,
seed_value=123,
split_percent=0.95 # 学習データの割合
)
train_source, train_target, test_source, test_target, train_licence, test_licence = dataset.split_data()
print('**** Amount of data ****')
print('train_source: ', len(train_source))
print('train_target: ', len(train_target))
print('test_source: ', len(test_source))
print('test_target: ', len(test_target))
print('\n')
print('**** Train data example ****')
print('Source Example: ', train_source[0])
print('Target Example: ', train_target[0])
print('Licence: ', train_licence[0])
print('\n')
print('**** Test data example ****')
print('Source Example: ', test_source[0])
print('Target Example: ', test_target[0])
print('Licence: ', test_licence[0])
# + [markdown] id="CNG8La-07Mzd" colab_type="text"
# # 前処理
# + id="hf2N9mWz7A2G" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598602806458, "user_tz": -540, "elapsed": 75539, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04377306958369458217"}}
BATCH_SIZE = 64 # バッチサイズ
MAX_LENGTH = 60 # シーケンスの長さ
USE_TPU = True # TPUを使うか
BUFFER_SIZE = 50000
# + id="XuNFsYnB7A5N" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598602810586, "user_tz": -540, "elapsed": 79665, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04377306958369458217"}}
train_dataset = preprocess_utils.PreprocessData(
mecab = MeCab.Tagger("-Ochasen"),
source_data = train_source,
target_data = train_target,
max_length = MAX_LENGTH,
batch_size = BATCH_SIZE,
test_flag = False,
train_dataset = None,
)
train_dataset.preprocess_data()
# + id="kQZaiADz8uOk" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598602811153, "user_tz": -540, "elapsed": 80231, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}}
test_dataset = preprocess_utils.PreprocessData(
mecab = MeCab.Tagger("-Ochasen"),
source_data = test_source,
target_data = test_target,
max_length = MAX_LENGTH,
batch_size = BATCH_SIZE,
test_flag = True,
train_dataset = train_dataset
)
test_dataset.preprocess_data()
# + [markdown] id="ZTZf4AzI7P7o" colab_type="text"
# # バッチ作成
# + id="t2ls-SZK7A7q" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 697} executionInfo={"status": "ok", "timestamp": 1598602839964, "user_tz": -540, "elapsed": 109037, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04377306958369458217"}} outputId="797a0709-d479-4258-e753-9c7facf4d99a"
if USE_TPU:
tpu_grpc_url = "grpc://" + os.environ["COLAB_TPU_ADDR"]
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu_grpc_url)
tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
strategy = tf.distribute.TPUStrategy(tpu_cluster_resolver)
trainset = tf.data.Dataset.from_tensor_slices((train_dataset.source_vector, train_dataset.target_vector))
trainset = trainset.map(lambda source, target: (tf.cast(source, tf.int64), tf.cast(target, tf.int64))).shuffle(buffer_size=BUFFER_SIZE).batch(BATCH_SIZE).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
if USE_TPU:
trainset = strategy.experimental_distribute_dataset(trainset)
# + id="wFpWc-sg8zFJ" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598602841148, "user_tz": -540, "elapsed": 110219, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}}
if USE_TPU:
PREDICT_BATCH_SIZE = 8
testset = tf.data.Dataset.from_tensor_slices((test_dataset.source_vector, test_dataset.target_vector))
testset = testset.map(lambda source, target: (tf.cast(source, tf.int64), tf.cast(target, tf.int64))).shuffle(buffer_size=50000).batch(PREDICT_BATCH_SIZE).prefetch(buffer_size=tf.data.experimental.AUTOTUNE)
testset = testset.take(1)
testset = strategy.experimental_distribute_dataset(testset)
# + [markdown] id="171z8b3v7Tmg" colab_type="text"
# # モデル定義
# + id="pC54O3da7A-O" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598602841148, "user_tz": -540, "elapsed": 110217, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}}
num_layers=4 # レイヤー数
d_model=64 # 中間層の次元数
num_heads=4 # Multi Head Attentionのヘッド数
dff=2048 # Feed Forward Networkの次元数
dropout_rate = 0.1 # ドロップアウト率
source_vocab_size = max(train_dataset.source_token.values()) + 1 # source文の語彙数
target_vocab_size = max(train_dataset.target_token.values()) + 1 # target文の語彙数
# + id="XvjwLsmP7BBE" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598602841149, "user_tz": -540, "elapsed": 110216, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}}
# 重み初期化
def initialize_weight(checkpoint_path, optimizer, transformer, max_length, batch_size, use_tpu=False):
if os.path.exists(checkpoint_path+'.pkl'):
if use_tpu:
number_of_tpu_cores = tpu_cluster_resolver.num_accelerators()['TPU']
initialize_source, initialize_target = [[1]*max_length]*number_of_tpu_cores, [[1]*max_length]*number_of_tpu_cores
initialize_set = tf.data.Dataset.from_tensor_slices((initialize_source, initialize_target))
initialize_set = initialize_set.map(lambda source, target: (tf.cast(source, tf.int64), tf.cast(target, tf.int64))
).shuffle(buffer_size=BUFFER_SIZE).batch(batch_size).prefetch(
buffer_size=tf.data.experimental.AUTOTUNE
)
initialize_set = strategy.experimental_distribute_dataset(initialize_set)
for inp, tar in initialize_set:
distributed_train_step(inp, tar)
else:
initialize_set = tf.ones([batch_size, max_length], tf.int64)
train_step(initialize_set, initialize_set)
try:
weight_utils.load_weights_from_pickle(checkpoint_path, optimizer, transformer)
except:
print('Failed to load checkpoints.')
else:
print('No available checkpoints.')
# + [markdown] id="zw8zmGVk7XQW" colab_type="text"
# # 予測
# + id="e_bnPfZx7BD6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1598603120923, "user_tz": -540, "elapsed": 389985, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}} outputId="780c22de-a7aa-445e-e527-8b5e8da979c6"
# %%time
with strategy.scope():
# Transformer
transformer = model.Transformer(num_layers, d_model, num_heads, dff,
source_vocab_size, target_vocab_size,
pe_input=source_vocab_size,
pe_target=target_vocab_size,
rate=dropout_rate)
# Learning Rate
learning_rate = model.CustomSchedule(d_model)
# Optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate, beta_1=0.9, beta_2=0.98,
epsilon=1e-9)
# Loss
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
# Loss Function
def loss_function(real, pred):
mask = tf.math.logical_not(tf.math.equal(real, 0))
loss_ = loss_object(real, pred)
mask = tf.cast(mask, dtype=loss_.dtype)
loss_ *= mask
return tf.reduce_mean(loss_)
# Metrics
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
# Checkpoint
checkpoint_path = "/content/drive/My Drive/Transformer-master/checkpoints/tpu/model"
def train_step(inp, tar):
tar_inp = tar[:, :-1]
tar_real = tar[:, 1:]
enc_padding_mask, combined_mask, dec_padding_mask = model.create_masks(inp, tar_inp)
with tf.GradientTape() as tape:
predictions, _ = transformer(inp, tar_inp,
True,
enc_padding_mask,
combined_mask,
dec_padding_mask)
loss = loss_function(tar_real, predictions)
gradients = tape.gradient(loss, transformer.trainable_variables)
optimizer.apply_gradients(zip(gradients, transformer.trainable_variables))
train_loss(loss)
train_accuracy(tar_real, predictions)
@tf.function
def distributed_train_step(X, y):
per_replica_losses = strategy.run(train_step, args=(X, y))
return strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None)
def test_step(inp, tar):
enc_padding_mask, combined_mask, dec_padding_mask = model.create_masks(inp, tar)
predictions, attention_weights = transformer(inp, tar,
False,
enc_padding_mask,
combined_mask,
dec_padding_mask)
predicted_id = tf.cast(tf.argmax(predictions[: ,-1:, :], axis=-1), tf.int64)
return inp, tf.concat([tar, predicted_id], axis=-1), attention_weights
@tf.function
def distributed_test_step(X, y):
return strategy.experimental_local_results(strategy.run(test_step, args=(X, y)))
# Initialize Weight
initialize_weight(checkpoint_path, optimizer, transformer, MAX_LENGTH, PREDICT_BATCH_SIZE, use_tpu=USE_TPU)
for inp, tar in testset:
for i in range(MAX_LENGTH):
inp, tar, attn = distributed_test_step(inp, tar)[0]
# + id="JnzYYW2T7BGW" colab_type="code" colab={} executionInfo={"status": "ok", "timestamp": 1598603120926, "user_tz": -540, "elapsed": 389986, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}}
def plot_attention_weight(sentence, attention, result):
fig = plt.figure(figsize=(16, 8))
for head in range(attention.shape[0]):
ax = fig.add_subplot(2, 4, head+1)
ax.matshow(attention[head][:-1, :], cmap='viridis')
fontdict = {'fontsize': 10}
ax.set_xticks(range(len(sentence)))
ax.set_yticks(range(result.shape[0]))
ax.set_ylim(result.shape[0]-1.5, -0.5)
tmp_list = []
for i in sentence:
try:
tmp_list.append(train_dataset.source_index[i.numpy()])
except:
pass
ax.set_xticklabels(tmp_list, fontdict=fontdict, rotation=90)
ax.set_yticklabels([train_dataset.target_index[i.numpy()] for i in result
if i < max(train_dataset.target_token.values()) - 1],
fontdict=fontdict)
ax.set_xlabel('Head {}'.format(head+1))
plt.tight_layout()
plt.show()
# + id="WMR8Z_lpLEpg" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} executionInfo={"status": "ok", "timestamp": 1598603127618, "user_tz": -540, "elapsed": 396674, "user": {"displayName": "demo hands-on", "photoUrl": "", "userId": "04377306958369458217"}} outputId="ecd69417-ac1c-4828-965b-655c09e7ab9a"
for i, (s, t, a) in enumerate(zip(inp.values, tar.values, attn['decoder_layer4_block2'].values)):
sentence = tf.squeeze(s)[:tf.argmax(tf.squeeze(s)).numpy()+1]
attention = a[0,:, :, :sentence.shape[0]]
result = tf.squeeze(t)[:tf.argmax(tf.squeeze(t)).numpy()+1][1:]
print("Input:", ' '.join([train_dataset.source_index[i.numpy()] for i in sentence][1:-1]))
print("Output:", ''.join([train_dataset.target_index[i.numpy()] for i in result][:-1]))
plot_attention_weight(sentence, attention, result)
| Predict_on_TPU.ipynb |
Subsets and Splits