prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
from flask import Flask, render_template, request, redirect, url_for, session
import pandas as pd
import pymysql
import os
import io
#from werkzeug.utils import secure_filename
from pulp import *
import numpy as np
import pymysql
import pymysql.cursors
from pandas.io import sql
#from sqlalchemy import create_engine
import pandas as pd
import numpy as np
#import io
import statsmodels.formula.api as smf
import statsmodels.api as sm
import scipy.optimize as optimize
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
#from flask import Flask, render_template, request, redirect, url_for, session, g
from sklearn.linear_model import LogisticRegression
from math import sin, cos, sqrt, atan2, radians
from statsmodels.tsa.arima_model import ARIMA
#from sqlalchemy import create_engine
from collections import defaultdict
from sklearn import linear_model
import statsmodels.api as sm
import scipy.stats as st
import pandas as pd
import numpy as np
from pulp import *
import pymysql
import math
app = Flask(__name__)
app.secret_key = os.urandom(24)
localaddress="D:\\home\\site\\wwwroot"
localpath=localaddress
os.chdir(localaddress)
@app.route('/')
def index():
return redirect(url_for('home'))
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/demandplanning')
def demandplanning():
return render_template("Demand_Planning.html")
@app.route("/elasticopt",methods = ['GET','POST'])
def elasticopt():
if request.method== 'POST':
start_date =request.form['from']
end_date=request.form['to']
prdct_name=request.form['typedf']
# connection = pymysql.connect(host='localhost',
# user='user',
# password='',
# db='test',
# charset='utf8mb4',
# cursorclass=pymysql.cursors.DictCursor)
#
# x=connection.cursor()
# x.execute("select * from `transcdata`")
# connection.commit()
# datass=pd.DataFrame(x.fetchall())
datass = pd.read_csv("C:\\Users\\1026819\\Downloads\\optimizdata.csv")
# datas = datass[(datass['Week']>=start_date) & (datass['Week']<=end_date )]
datas=datass
df = datas[datas['Product'] == prdct_name]
df=datass
changeData=pd.concat([df['Product_Price'],df['Product_Qty']],axis=1)
changep=[]
changed=[]
for i in range(0,len(changeData)-1):
changep.append(changeData['Product_Price'].iloc[i]-changeData['Product_Price'].iloc[i+1])
changed.append(changeData['Product_Qty'].iloc[1]-changeData['Product_Qty'].iloc[i+1])
cpd=pd.concat([pd.DataFrame(changep),pd.DataFrame(changed)],axis=1)
cpd.columns=['Product_Price','Product_Qty']
sortedpricedata=df.sort_values(['Product_Price'], ascending=[True])
spq=pd.concat([sortedpricedata['Product_Price'],sortedpricedata['Product_Qty']],axis=1).reset_index(drop=True)
pint=[]
dint=[]
x = spq['Product_Price']
num_bins = 5
# n, pint, patches = plt.hist(x, num_bins, facecolor='blue', alpha=0.5)
y = spq['Product_Qty']
num_bins = 5
# n, dint, patches = plt.hist(y, num_bins, facecolor='blue', alpha=0.5)
arr= np.zeros(shape=(len(pint),len(dint)))
count=0
for i in range(0, len(pint)):
lbp=pint[i]
if i==len(pint)-1:
ubp=pint[i]+1
else:
ubp=pint[i+1]
for j in range(0, len(dint)):
lbd=dint[j]
if j==len(dint)-1:
ubd=dint[j]+1
else:
ubd=dint[j+1]
print(lbd,ubd)
for k in range(0, len(spq)):
if (spq['Product_Price'].iloc[k]>=lbp\
and spq['Product_Price'].iloc[k]<ubp):
if(spq['Product_Qty'].iloc[k]>=lbd\
and spq['Product_Qty'].iloc[k]<ubd):
count+=1
arr[i][j]+=1
price_range=np.zeros(shape=(len(pint),2))
for j in range(0,len(pint)):
lbp=pint[j]
price_range[j][0]=lbp
if j==len(pint)-1:
ubp=pint[j]+1
price_range[j][1]=ubp
else:
ubp=pint[j+1]
price_range[j][1]=ubp
demand_range=np.zeros(shape=(len(dint),2))
for j in range(0,len(dint)):
lbd=dint[j]
demand_range[j][0]=lbd
if j==len(dint)-1:
ubd=dint[j]+1
demand_range[j][1]=ubd
else:
ubd=dint[j+1]
demand_range[j][1]=ubd
pr=pd.DataFrame(price_range)
pr.columns=['Price','Demand']
dr=pd.DataFrame(demand_range)
dr.columns=['Price','Demand']
priceranges=pr.Price.astype(str).str.cat(pr.Demand.astype(str), sep='-')
demandranges=dr.Price.astype(str).str.cat(dr.Demand.astype(str), sep='-')
price=pd.DataFrame(arr)
price.columns=demandranges
price.index=priceranges
pp=price.reset_index()
global data
data=pd.concat([df['Week'],df['Product_Qty'],df['Product_Price'],df['Comp_Prod_Price'],df['Promo1'],df['Promo2'],df['overallsale']],axis=1)
return render_template('dataview.html',cpd=cpd.values,pp=pp.to_html(index=False),data=data.to_html(index=False),graphdata=data.values,ss=1)
return render_template('dataview.html')
@app.route('/priceelasticity',methods = ['GET','POST'])
def priceelasticity():
return render_template('Optimisation_heatmap_revenue.html')
@app.route("/elasticity",methods = ['GET','POST'])
def elasticity():
if request.method== 'POST':
Price=0
Average_Price=0
Promotions=0
Promotionss=0
if request.form.get('Price'):
Price=1
if request.form.get('Average_Price'):
Average_Price=1
if request.form.get('Promotion_1'):
Promotions=1
if request.form.get('Promotion_2'):
Promotionss=1
Modeldata=pd.DataFrame()
Modeldata['Product_Qty']=data.Product_Qty
lst=[]
for row in data.index:
lst.append(row+1)
Modeldata['Week']=np.log(lst)
if Price == 1:
Modeldata['Product_Price']=data['Product_Price']
if Price == 0:
Modeldata['Product_Price']=0
if Average_Price==1:
Modeldata['Comp_Prod_Price']=data['Comp_Prod_Price']
if Average_Price==0:
Modeldata['Comp_Prod_Price']=0
if Promotions==1:
Modeldata['Promo1']=data['Promo1']
if Promotions==0:
Modeldata['Promo1']=0
if Promotionss==1:
Modeldata['Promo2']=data['Promo2']
if Promotionss==0:
Modeldata['Promo2']=0
diffpriceprodvscomp= (Modeldata['Product_Price']-Modeldata['Comp_Prod_Price'])
promo1=Modeldata.Promo1
promo2=Modeldata.Promo2
week=Modeldata.Week
quantityproduct=Modeldata.Product_Qty
df=pd.concat([quantityproduct,diffpriceprodvscomp,promo1,promo2,week],axis=1)
df.columns=['quantityproduct','diffpriceprodvscomp','promo1','promo2','week']
Model = smf.ols(formula='df.quantityproduct ~ df.diffpriceprodvscomp + df.promo1 + df.promo2 + df.week', data=df)
res = Model.fit()
global intercept,diffpriceprodvscomp_param,promo1_param,promo2_param,week_param
intercept=res.params[0]
diffpriceprodvscomp_param=res.params[1]
promo1_param=res.params[2]
promo2_param=res.params[3]
week_param=res.params[4]
Product_Price_min=0
maxvalue_of_price=int(Modeldata['Product_Price'].max())
Product_Price_max=int(Modeldata['Product_Price'].max())
if maxvalue_of_price==0:
Product_Price_max=1
maxfunction=[]
pricev=[]
weeks=[]
dd=[]
ddl=[]
for vatr in range(0,len(Modeldata)):
weeks.append(lst[vatr])
for Product_Price in range(Product_Price_min,Product_Price_max+1):
function=0
function=(intercept+(Modeldata['Promo1'].iloc[vatr]*promo1_param)+(Modeldata['Promo2'].iloc[vatr]*promo2_param) +
(diffpriceprodvscomp_param*(Product_Price-Modeldata['Comp_Prod_Price'].iloc[vatr]))+(Modeldata['Week'].iloc[vatr]*lst[vatr]))
maxfunction.append(function)
dd.append(Product_Price)
ddl.append(vatr)
for Product_Price in range(Product_Price_min,Product_Price_max+1):
pricev.append(Product_Price)
df1=pd.DataFrame(maxfunction)
df2=pd.DataFrame(dd)
df3=pd.DataFrame(ddl)
dfo=pd.concat([df3,df2,df1],axis=1)
dfo.columns=['weeks','prices','Demandfunctions']
demand=[]
for rows in dfo.values:
w=int(rows[0])
p=int(rows[1])
d=int(rows[2])
demand.append([w,p,d])
Co_eff=pd.DataFrame(res.params.values)#intercept
standard_error=pd.DataFrame(res.bse.values)#standard error
p_values=pd.DataFrame(res.pvalues.values)
conf_lower =pd.DataFrame(res.conf_int()[0].values)
conf_higher =pd.DataFrame(res.conf_int()[1].values)
R_square=res.rsquared
atr=['Intercept','DeltaPrice','Promo1','Promo2','Week']
atribute=pd.DataFrame(atr)
SummaryTable=pd.concat([atribute,Co_eff,standard_error,p_values,conf_lower,conf_higher],axis=1)
SummaryTable.columns=['Atributes','Co_eff','Standard_error','P_values','conf_lower','conf_higher']
reshapedf=df1.values.reshape(len(Modeldata),(-Product_Price_min+(Product_Price_max+1)))
dataofmas=pd.DataFrame(reshapedf)
maxv=dataofmas.apply( max, axis=1 )
minv=dataofmas.apply(min,axis=1)
avgv=dataofmas.sum(axis=1)/(-Product_Price_min+(Product_Price_max+1))
wks=pd.DataFrame(weeks)
ddofs=pd.concat([wks,minv,avgv,maxv],axis=1)
dataofmas=pd.DataFrame(reshapedf)
kk=pd.DataFrame()
sums=0
for i in range(0,len(dataofmas.columns)):
sums=sums+i
vv=i*dataofmas[[i]]
kk=pd.concat([kk,vv],axis=1)
dfr=pd.DataFrame(kk)
mrevenue=dfr.apply( max, axis=1 )
prices=dfr.idxmax(axis=1)
wks=pd.DataFrame(weeks)
revenuedf=pd.concat([wks,mrevenue,prices],axis=1)
return render_template('Optimisation_heatmap_revenue.html',revenuedf=revenuedf.values,ddofs=ddofs.values,SummaryTable=SummaryTable.to_html(index=False),ss=1,weeks=weeks,demand=demand,pricev=pricev,R_square=R_square)
@app.route('/inputtomaxm',methods=["GET","POST"])
def inputtomaxm():
return render_template("Optimize.html")
@app.route("/maxm",methods=["GET","POST"])
def maxm():
if request.method=="POST":
week=request.form['TimePeriod']
price_low=request.form['Price_Lower']
price_max=request.form['Price_Upper']
promofirst=request.form['Promotion_1']
promosecond=request.form['Promotion_2']
# week=24
# price_low=6
# price_max=20
# promofirst=1
# promosecond=0
#
# time_period=24
#
# global a
# a=243.226225
# global b
# b=-9.699634
# global d
# d=1.671505
# global pr1
# pr1=21.866260
# global pr2
# pr2=-0.511606
# global cm
# cm=-14.559594
# global s_0
# s_0= 2000
# promo1=1
# promo2=0
time_period=int(week)
global a
a=intercept
global b
b=diffpriceprodvscomp_param
global d
d=week_param
global pr1
pr1=promo1_param
global pr2
pr2=promo2_param
global s_0
s_0= 2000
promo1=int(promofirst)
promo2=int(promosecond)
global comp
comp=np.random.randint(7,15,time_period)
def demand(p, a=a, b=b, d=d, promo1=promo1,promo2_param=promo2,comp=comp, t=np.linspace(1,time_period,time_period)):
""" Return demand given an array of prices p for times t
(see equation 5 above)"""
return a+(b*(p-comp))+(d*t)+(promo1*pr1)+(promo2*pr2)
def objective(p_t, a, b, d,promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
return -1.0 * np.sum( p_t * demand(p_t, a, b, d,promo1,promo2, comp, t) )
def constraint_1(p_t, s_0, a, b, d, promo1,promo2, comp, t=np.linspace(1,time_period,time_period)):
""" Inventory constraint. s_0 - np.sum(x_t) >= 0.
This is an inequality constraint. See more below.
"""
return s_0 - np.sum(demand(p_t, a, b, d,promo1,promo2, comp, t))
def constraint_2(p_t):
#""" Positive demand. Another inequality constraint x_t >= 0 """
return p_t
t = np.linspace(1,time_period,time_period)
# Starting values :
b_min=int(price_low)
p_start = b_min * np.ones(len(t))
# bounds on the values :
bmax=int(price_max)
bounds = tuple((0,bmax) for x in p_start)
import scipy.optimize as optimize
# Constraints :
constraints = ({'type': 'ineq', 'fun': lambda x, s_0=s_0: constraint_1(x,s_0, a, b, d,promo1,promo2, comp, t=t)},
{'type': 'ineq', 'fun': lambda x: constraint_2(x)}
)
opt_results = optimize.minimize(objective, p_start, args=(a, b, d,promo1,promo2, comp, t),
method='SLSQP', bounds=bounds, constraints=constraints)
np.sum(opt_results['x'])
opt_price=opt_results['x']
opt_demand=demand(opt_results['x'], a, b, d, promo1,promo2_param, comp, t=t)
weeks=[]
for row in range(1,len(opt_price)+1):
weeks.append(row)
d=pd.DataFrame(weeks).astype(int)
dd=pd.DataFrame(opt_price)
optimumumprice_perweek=pd.concat([d,dd,pd.DataFrame(opt_demand).astype(int)],axis=1)
optimumumprice_perweek.columns=['Week','Price','Demand']
dataval=optimumumprice_perweek
diff=[]
diffs=[]
for i in range(0,len(opt_demand)-1):
valss=opt_demand[i]-opt_demand[i+1]
diff.append(valss)
diffs.append(i+1)
differenceofdemand_df=pd.concat([pd.DataFrame(diffs),pd.DataFrame(diff)],axis=1)
MP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmin()],1)
minimumprice=pd.DataFrame(MP).T
MaxP=round(optimumumprice_perweek.loc[optimumumprice_perweek['Price'].idxmax()],1)
maximumprice=pd.DataFrame(MaxP).T
averageprice=round((optimumumprice_perweek['Price'].sum()/len(optimumumprice_perweek)),2)
MD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmin()],0)
minimumDemand=pd.DataFrame(MD).T
MaxD=round(optimumumprice_perweek.loc[optimumumprice_perweek['Demand'].idxmax()],0)
maximumDemand=pd.DataFrame(MaxD).T
averageDemand=round((optimumumprice_perweek['Demand'].sum()/len(optimumumprice_perweek)),0)
totaldemand=round(optimumumprice_perweek['Demand'].sum(),0)
return render_template("Optimize.html",totaldemand=totaldemand,averageDemand=averageDemand,maximumDemand=maximumDemand.values,minimumDemand=minimumDemand.values,averageprice=averageprice,maximumprice=maximumprice.values,minimumprice=minimumprice.values,dataval=dataval.values,differenceofdemand_df=differenceofdemand_df.values,optimumumprice_perweek=optimumumprice_perweek.to_html(index=False),ll=1)
@app.route("/Inventorymanagment",methods=["GET","POST"])
def Inventorymanagment():
return render_template("Inventory_Management.html")
@app.route("/DISTRIBUTION_NETWORK_OPT",methods=["GET","POST"])
def DISTRIBUTION_NETWORK_OPT():
return render_template("DISTRIBUTION_NETWORK_OPTIMIZATION.html")
@app.route("/Procurement_Plan",methods=["GET","POST"])
def Procurement_Plan():
return render_template("Procurement_Planning.html")
#<NAME>
@app.route("/fleetallocation")
def fleetallocation():
return render_template('fleetallocation.html')
@app.route("/reset")
def reset():
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM `input`")
cur.execute("DELETE FROM `output`")
cur.execute("DELETE FROM `Scenario`")
conn.commit()
conn.close()
open(localaddress+'\\static\\demodata.txt', 'w').close()
return render_template('fleetallocation.html')
@app.route("/dalink",methods = ['GET','POST'])
def dalink():
sql = "INSERT INTO `input` (`Route`,`SLoc`,`Ship-to Abb`,`Primary Equipment`,`Batch`,`Prod Dt`,`SW`,`Met Held`,`Heat No`,`Delivery Qty`,`Width`,`Length`,`Test Cut`,`Customer Priority`) VALUES( %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
if request.method == 'POST':
typ = request.form.get('type')
frm = request.form.get('from')
to = request.form.get('to')
if typ and frm and to:
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("SELECT * FROM `inventory_data` WHERE `Primary Equipment` = '" + typ + "' AND `Prod Dt` BETWEEN '" + frm + "' AND '" + to + "'")
res = cur.fetchall()
if len(res)==0:
conn.close()
return render_template('fleetallocation.html',alert='No data available')
sfile = pd.DataFrame(res)
df1 = pd.DataFrame(sfile)
df1['Prod Dt'] =df1['Prod Dt'].astype(object)
for index, i in df1.iterrows():
data = (i['Route'],i['SLoc'],i['Ship-to Abb'],i['Primary Equipment'],i['Batch'],i['Prod Dt'],i['SW'],i['Met Held'],i['Heat No'],i['Delivery Qty'],i['Width'],i['Length'],i['Test Cut'],i['Customer Priority'])
curr.execute(sql,data)
conn.commit()
conn.close()
return render_template('fleetallocation.html',typ=" Equipment type: "+typ,frm="From: "+frm,to=" To:"+to,data = sfile.to_html(index=False))
else:
return render_template('fleetallocation.html',alert ='All input fields are required')
return render_template('fleetallocation.html')
@app.route('/optimise', methods=['GET', 'POST'])
def optimise():
open(localaddress+'\\static\\demodata.txt', 'w').close()
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
cur.execute("DELETE FROM `output`")
conn.commit()
os.system('python optimising.py')
sa=1
cur.execute("SELECT * FROM `output`")
result = cur.fetchall()
if len(result)==0:
say=0
else:
say=1
curr.execute("SELECT * FROM `input`")
sfile = curr.fetchall()
if len(sfile)==0:
conn.close()
return render_template('fleetallocation.html',say=say,sa=sa,alert='No data available')
sfile = pd.DataFrame(sfile)
conn.close()
with open(localaddress+"\\static\\demodata.txt", "r") as f:
content = f.read()
return render_template('fleetallocation.html',say=say,sa=sa,data = sfile.to_html(index=False),content=content)
@app.route("/scenario")
def scenario():
return render_template('scenario.html')
@app.route("/scenario_insert", methods=['GET','POST'])
def scenario_insert():
if request.method == 'POST':
scenario = request.form.getlist("scenario[]")
customer_priority = request.form.getlist("customer_priority[]")
oldest_sw = request.form.getlist("oldest_sw[]")
production_date = request.form.getlist("production_date[]")
met_held_group = request.form.getlist("met_held_group[]")
test_cut_group = request.form.getlist("test_cut_group[]")
sub_grouping_rules = request.form.getlist("sub_grouping_rules[]")
load_lower_bounds = request.form.getlist("load_lower_bounds[]")
load_upper_bounds = request.form.getlist("load_upper_bounds[]")
width_bounds = request.form.getlist("width_bounds[]")
length_bounds = request.form.getlist("length_bounds[]")
description = request.form.getlist("description[]")
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
curr = conn.cursor()
lngth = len(scenario)
curr.execute("DELETE FROM `scenario`")
if scenario and customer_priority and oldest_sw and production_date and met_held_group and test_cut_group and sub_grouping_rules and load_lower_bounds and load_upper_bounds and width_bounds and length_bounds and description:
say=0
for i in range(lngth):
scenario_clean = scenario[i]
customer_priority_clean = customer_priority[i]
oldest_sw_clean = oldest_sw[i]
production_date_clean = production_date[i]
met_held_group_clean = met_held_group[i]
test_cut_group_clean = test_cut_group[i]
sub_grouping_rules_clean = sub_grouping_rules[i]
load_lower_bounds_clean = load_lower_bounds[i]
load_upper_bounds_clean = load_upper_bounds[i]
width_bounds_clean = width_bounds[i]
length_bounds_clean = length_bounds[i]
description_clean = description[i]
if scenario_clean and customer_priority_clean and oldest_sw_clean and production_date_clean and met_held_group_clean and test_cut_group_clean and sub_grouping_rules_clean and load_lower_bounds_clean and load_upper_bounds_clean and width_bounds_clean and length_bounds_clean:
cur.execute("INSERT INTO `scenario`(scenario, customer_priority, oldest_sw, production_date, met_held_group, test_cut_group, sub_grouping_rules, load_lower_bounds, load_upper_bounds, width_bounds, length_bounds, description) VALUES('"+scenario_clean+"' ,'"+customer_priority_clean+"','"+oldest_sw_clean+"','"+production_date_clean+"','"+met_held_group_clean+"','"+test_cut_group_clean+"', '"+sub_grouping_rules_clean+"','"+load_lower_bounds_clean+"', '"+load_upper_bounds_clean+"','"+width_bounds_clean+"','"+length_bounds_clean+"','"+description_clean+"')")
else:
say = 1
conn.commit()
if(say==0):
alert='All Scenarios inserted'
else:
alert='Some scenarios were not inserted'
return (alert)
conn.close()
return ('All fields are required!')
return ('Failed!!!')
@app.route("/fetch", methods=['GET','POST'])
def fetch():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SELECT * FROM scenario")
result = cur.fetchall()
if len(result)==0:
conn.close()
return render_template('scenario.html',alert1='No scenarios Available')
result1 = pd.DataFrame(result)
result1 = result1.drop('Sub-grouping rules', axis=1)
conn.close()
return render_template('scenario.html',sdata = result1.to_html(index=False))
return ("Error")
@app.route("/delete", methods=['GET','POST'])
def delete():
if request.method == 'POST':
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("DELETE FROM scenario")
conn.commit()
conn.close()
return render_template('scenario.html',alert1="All the scenerios were dropped!")
return ("Error")
@app.route('/papadashboard', methods=['GET', 'POST'])
def papadashboard():
sql1 = "SELECT `Scenario`, MAX(`Wagon-No`) AS 'Wagon Used', COUNT(`Batch`) AS 'Products Allocated', SUM(`Delivery Qty`) AS 'Total Product Allocated', SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', SUM(`Width`)/(MAX(`Wagon-No`)) AS 'Average Width Used' FROM `output` WHERE `Wagon-No`>0 GROUP BY `Scenario`"
conn = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
curs = conn.cursor()
curs.execute("SELECT `scenario` FROM `scenario`")
sdata = curs.fetchall()
if len(sdata)==0:
conn.close()
return render_template('warning.html',alert='No data available')
cur1 = conn.cursor()
cur1.execute(sql1)
data1 = cur1.fetchall()
if len(data1)==0:
conn.close()
return render_template('warning.html',alert='Infeasible to due Insufficient Load')
cu = conn.cursor()
cu.execute("SELECT `length_bounds`,`width_bounds`,`load_lower_bounds`,`load_upper_bounds` FROM `scenario`")
sdaa = cu.fetchall()
sdaa = pd.DataFrame(sdaa)
asa=list()
for index, i in sdaa.iterrows():
hover = "Length Bound:"+str(i['length_bounds'])+", Width Bound:"+str(i['width_bounds'])+", Load Upper Bound:"+str(i['load_upper_bounds'])+", Load Lower Bound:"+str(i['load_lower_bounds'])
asa.append(hover)
asa=pd.DataFrame(asa)
asa.columns=['Details']
data1 = pd.DataFrame(data1)
data1['Average Width Used'] = data1['Average Width Used'].astype(int)
data1['Total Product Allocated'] = data1['Total Product Allocated'].astype(int)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(float)
data1['Average Load Carried'] = round(data1['Average Load Carried'],2)
data1['Average Load Carried'] = data1['Average Load Carried'].astype(str)
fdata = pd.DataFrame(columns=['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used','Details'])
fdata[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']] = data1[['Scenario','Wagon Used','Products Allocated','Total Product Allocated','Average Load Carried','Average Width Used']]
fdata['Details'] = asa['Details']
fdata = fdata.values
sql11 = "SELECT `Scenario`, SUM(`Delivery Qty`)/(MAX(`Wagon-No`)) AS 'Average Load Carried', COUNT(`Batch`) AS 'Allocated', SUM(`Delivery Qty`) AS 'Load Allocated' FROM `output`WHERE `Wagon-No`>0 GROUP BY `Scenario`"
sql21 = "SELECT COUNT(`Batch`) AS 'Total Allocated' FROM `output` GROUP BY `Scenario`"
sql31 = "SELECT `load_upper_bounds` FROM `scenario`"
conn1 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur11 = conn1.cursor()
cur21 = conn1.cursor()
cur31 = conn1.cursor()
cur11.execute(sql11)
data11 = cur11.fetchall()
data11 = pd.DataFrame(data11)
cur21.execute(sql21)
data21 = cur21.fetchall()
data21 = pd.DataFrame(data21)
cur31.execute(sql31)
data31 = cur31.fetchall()
data31 = pd.DataFrame(data31)
data11['Average Load Carried']=data11['Average Load Carried'].astype(float)
fdata1 = pd.DataFrame(columns=['Scenario','Utilisation Percent','Allocation Percent','Total Load Allocated'])
fdata1['Utilisation Percent'] = round(100*(data11['Average Load Carried']/data31['load_upper_bounds']),2)
data11['Load Allocated']=data11['Load Allocated'].astype(int)
fdata1[['Scenario','Total Load Allocated']]=data11[['Scenario','Load Allocated']]
data11['Allocated']=data11['Allocated'].astype(float)
data21['Total Allocated']=data21['Total Allocated'].astype(float)
fdata1['Allocation Percent'] = round(100*(data11['Allocated']/data21['Total Allocated']),2)
fdata1['Allocation Percent'] = fdata1['Allocation Percent'].astype(str)
fdat1 = fdata1.values
conn1.close()
if request.method == 'POST':
conn2 = pymysql.connect(host='localhost',user='root',password='',db='inventory_management',charset='utf8mb4',cursorclass=pymysql.cursors.DictCursor)
cur = conn2.cursor()
ata = request.form['name']
cur.execute("SELECT * FROM `output` WHERE `Scenario` = '"+ata+"' ")
ssdata = cur.fetchall()
datasss = pd.DataFrame(ssdata)
data=datasss.replace("Not Allocated", 0)
df=data[['Delivery Qty','Wagon-No','Width','Group-Number']]
df['Wagon-No']=df['Wagon-No'].astype(int)
a=df['Wagon-No'].max()
##bar1
result_array = np.array([])
for i in range (a):
data_i = df[df['Wagon-No'] == i+1]
del_sum_i = data_i['Delivery Qty'].sum()
per_i=[((del_sum_i)/(205000)*100)]
result_array = np.append(result_array, per_i)
result_array1 = np.array([])
for j in range (a):
data_j = df[df['Wagon-No'] == j+1]
del_sum_j = data_j['Width'].sum()
per_util_j=[((del_sum_j)/(370)*100)]
result_array1 = np.append(result_array1, per_util_j)
##pie1
df112 = df[df['Wagon-No'] == 0]
pie1 = df112 ['Width'].sum()
df221 = df[df['Wagon-No'] > 0]
pie11 = df221['Width'].sum()
df1=data[['SW','Group-Number']]
dff1 = df1[data['Wagon-No'] == 0]
da1 =dff1.groupby(['SW']).count()
re11 = np.array([])
res12 = np.append(re11,da1)
da1['SW'] = da1.index
r1 = np.array([])
r12 = np.append(r1, da1['SW'])
df0=data[['Group-Number','Route','SLoc','Ship-to Abb','Wagon-No','Primary Equipment']]
df1=df0.replace("Not Allocated", 0)
f2 = pd.DataFrame(df1)
f2['Wagon-No']=f2['Wagon-No'].astype(int)
####Not-Allocated
f2['Group']=data['Group-Number']
df=f2[['Group','Wagon-No']]
dee = df[df['Wagon-No'] == 0]
deer =dee.groupby(['Group']).count()##Not Allocated
deer['Group'] = deer.index
##Total-Data
f2['Group1']=data['Group-Number']
dfc=f2[['Group1','Wagon-No']]
dfa=pd.DataFrame(dfc)
der = dfa[dfa['Wagon-No'] >= 0]
dear =der.groupby(['Group1']).count()##Wagons >1
dear['Group1'] = dear.index
dear.rename(columns={'Wagon-No': 'Allocated'}, inplace=True)
result = pd.concat([deer, dear], axis=1, join_axes=[dear.index])
resu=result[['Group1','Wagon-No','Allocated']]
result1=resu.fillna(00)
r5 = np.array([])
r6 = np.append(r5, result1['Wagon-No'])
r66=r6[0:73]###Not Allocated
r7 = np.append(r5, result1['Allocated'])
r77=r7[0:73]####total
r8 = np.append(r5, result1['Group1'])
r88=r8[0:73]###group
conn2.close()
return render_template('papadashboard.html',say=1,data=fdata,data1=fdat1,ata=ata,bar1=result_array,bar11=result_array1,pie11=pie1,pie111=pie11,x=r12,y=res12,xname=r88, bar7=r77,bar8=r66)
conn.close()
return render_template('papadashboard.html',data=fdata,data1=fdat1)
@app.route('/facilityallocation')
def facilityallocation():
return render_template('facilityhome.html')
@app.route('/dataimport')
def dataimport():
return render_template('facilityimport.html')
@app.route('/dataimport1')
def dataimport1():
return redirect(url_for('dataimport'))
@app.route('/facility_location')
def facility_location():
return render_template('facility_location.html')
@app.route('/facility')
def facility():
return redirect(url_for('facilityallocation'))
@app.route("/imprt", methods=['GET','POST'])
def imprt():
global customerdata
global factorydata
global Facyy
global Custo
customerfile = request.files['CustomerData'].read()
factoryfile = request.files['FactoryData'].read()
if len(customerfile)==0 or len(factoryfile)==0:
return render_template('facilityhome.html',warning='Data Invalid')
cdat=pd.read_csv(io.StringIO(customerfile.decode('utf-8')))
customerdata=pd.DataFrame(cdat)
fdat=pd.read_csv(io.StringIO(factoryfile.decode('utf-8')))
factorydata=pd.DataFrame(fdat)
Custo=customerdata.drop(['Lat','Long'],axis=1)
Facyy=factorydata.drop(['Lat','Long'],axis=1)
return render_template('facilityimport1.html',loc1=factorydata.values,loc2=customerdata.values,factory=Facyy.to_html(index=False),customer=Custo.to_html(index=False))
@app.route("/gmap")
def gmap():
custdata=customerdata
Factorydata=factorydata
price=1
#to get distance beetween customer and factory
#first get the Dimension
#get no of factories
Numberoffact=len(Factorydata)
#get Number of Customer
Numberofcust=len(custdata)
#Get The dist/unit cost
cost=price
#def function for distance calculation
# approximate radius of earth in km
def dist(lati1,long1,lati2,long2,cost):
R = 6373.0
lat1 = radians(lati1)
lon1 = radians(long1)
lat2 = radians(lati2)
lon2 = radians(long2)
dlon = lon2 - lon1
dlat = lat2 - lat1
a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
distance =round(R * c,2)
return distance*cost
#Create a list for customer and factory
def costtable(custdata,Factorydata):
distance=list()
for lat1,long1 in zip(custdata.Lat, custdata.Long):
for lat2,long2 in zip(Factorydata.Lat, Factorydata.Long):
distance.append(dist(lat1,long1,lat2,long2,cost))
distable=np.reshape(distance, (Numberofcust,Numberoffact)).T
tab=pd.DataFrame(distable,index=[Factorydata.Factory],columns=[custdata.Customer])
return tab
DelCost=costtable(custdata,Factorydata)#return cost table of the customer and factoery
#creating Demand Table
demand=np.array(custdata.Demand)
col1=np.array(custdata.Customer)
Demand=pd.DataFrame(demand,col1).T
cols=sorted(col1)
#Creating capacity table
fact=np.array(Factorydata.Capacity)
col2=np.array(Factorydata.Factory)
Capacity=pd.DataFrame(fact,index=col2).T
colo=sorted(col2)
#creating Fixed cost table
fixed_c=np.array(Factorydata.FixedCost)
col3=np.array(Factorydata.Factory)
FixedCost= pd.DataFrame(fixed_c,index=col3)
# Create the 'prob' variable to contain the problem data
model = LpProblem("Min Cost Facility Location problem",LpMinimize)
production = pulp.LpVariable.dicts("Production",
((factory, cust) for factory in Capacity for cust in Demand),
lowBound=0,
cat='Integer')
factory_status =pulp.LpVariable.dicts("factory_status", (factory for factory in Capacity),
cat='Binary')
cap_slack =pulp.LpVariable.dicts("capslack",
(cust for cust in Demand),
lowBound=0,
cat='Integer')
model += pulp.lpSum(
[DelCost.loc[factory, cust] * production[factory, cust] for factory in Capacity for cust in Demand]
+ [FixedCost.loc[factory] * factory_status[factory] for factory in Capacity] + 5000000*cap_slack[cust] for cust in Demand)
for cust in Demand:
model += pulp.lpSum(production[factory, cust] for factory in Capacity)+cap_slack[cust] == Demand[cust]
for factory in Capacity:
model += pulp.lpSum(production[factory, cust] for cust in Demand) <= Capacity[factory]*factory_status[factory]
model.solve()
print("Status:", LpStatus[model.status])
for v in model.variables():
print(v.name, "=", v.varValue)
print("Total Cost of Ingredients per can = ", value(model.objective))
# Getting the table for the Factorywise Allocation
def factoryalloc(model,Numberoffact,Numberofcust,listoffac,listofcus):
listj=list()
listk=list()
listcaps=list()
for v in model.variables():
listj.append(v.varValue)
customer=listj[(len(listj)-Numberofcust-Numberoffact):(len(listj)-Numberoffact)]
del listj[(len(listj)-Numberoffact-Numberofcust):len(listj)]
for row in listj:
if row==0:
listk.append(0)
else:
listk.append(1)
x=np.reshape(listj,(Numberoffact,Numberofcust))
y=np.reshape(listk,(Numberoffact,Numberofcust))
FactoryAlloc_table=pd.DataFrame(x,index=listoffac,columns=listofcus)
Factorystatus=pd.DataFrame(y,index=listoffac,columns=listofcus)
return FactoryAlloc_table,Factorystatus,customer
Alltable,FactorystatusTable,ded=factoryalloc(model,Numberoffact,Numberofcust,colo,cols)
Allstatus=list()
dede=pd.DataFrame(ded,columns=['UnSatisfied'])
finaldede=dede[dede.UnSatisfied != 0]
colss=pd.DataFrame(cols,columns=['CustomerLocation'])
fina=pd.concat([colss,finaldede],axis=1, join='inner')
print(fina)
for i in range(len(Alltable)):
for j in range(len(Alltable.columns)):
if (Alltable.loc[Alltable.index[i], Alltable.columns[j]]>0):
all=[Alltable.index[i], Alltable.columns[j], Alltable.loc[Alltable.index[i], Alltable.columns[j]]]
Allstatus.append(all)
Status=
|
pd.DataFrame(Allstatus,columns=['Factory','Customer','Allocation'])
|
pandas.DataFrame
|
from __future__ import division
import json
import re
import time
from pandas import DataFrame, isnull, notnull, to_datetime
from pandas_datareader._utils import RemoteDataError
from pandas_datareader.base import _DailyBaseReader
class YahooDailyReader(_DailyBaseReader):
"""
Returns DataFrame of with historical over date range,
start to end.
To avoid being penalized by Yahoo! Finance servers, pauses between
downloading 'chunks' of symbols can be specified.
Parameters
----------
symbols : string, array-like object (list, tuple, Series), or DataFrame
Single stock symbol (ticker), array-like object of symbols or
DataFrame with index containing stock symbols.
start : string, int, date, datetime, Timestamp
Starting date. Parses many different kind of date
representations (e.g., 'JAN-01-2010', '1/1/10', 'Jan, 1, 1980'). Defaults to
5 years before current date.
end : string, int, date, datetime, Timestamp
Ending date
retry_count : int, default 3
Number of times to retry query request.
pause : int, default 0.1
Time, in seconds, to pause between consecutive queries of chunks. If
single value given for symbol, represents the pause between retries.
session : Session, default None
requests.sessions.Session instance to be used
adjust_price : bool, default False
If True, adjusts all prices in hist_data ('Open', 'High', 'Low',
'Close') based on 'Adj Close' price. Adds 'Adj_Ratio' column and drops
'Adj Close'.
ret_index : bool, default False
If True, includes a simple return index 'Ret_Index' in hist_data.
chunksize : int, default 25
Number of symbols to download consecutively before intiating pause.
interval : string, default 'd'
Time interval code, valid values are 'd' for daily, 'w' for weekly,
'm' for monthly.
get_actions : bool, default False
If True, adds Dividend and Split columns to dataframe.
adjust_dividends: bool, default true
If True, adjusts dividends for splits.
"""
def __init__(
self,
symbols=None,
start=None,
end=None,
retry_count=3,
pause=0.1,
session=None,
adjust_price=False,
ret_index=False,
chunksize=1,
interval="d",
get_actions=False,
adjust_dividends=True,
):
super(YahooDailyReader, self).__init__(
symbols=symbols,
start=start,
end=end,
retry_count=retry_count,
pause=pause,
session=session,
chunksize=chunksize,
)
# Ladder up the wait time between subsequent requests to improve
# probability of a successful retry
self.pause_multiplier = 2.5
self.headers = {
"Connection": "keep-alive",
"Expires": str(-1),
"Upgrade-Insecure-Requests": str(1),
# Google Chrome:
"User-Agent": (
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 "
"(KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"
),
}
self.adjust_price = adjust_price
self.ret_index = ret_index
self.interval = interval
self._get_actions = get_actions
if self.interval not in ["d", "wk", "mo", "m", "w"]:
raise ValueError(
"Invalid interval: valid values are 'd', 'wk' and 'mo'. 'm' and 'w' "
"have been implemented for backward compatibility. 'v' has been moved "
"to the yahoo-actions or yahoo-dividends APIs."
)
elif self.interval in ["m", "mo"]:
self.pdinterval = "m"
self.interval = "mo"
elif self.interval in ["w", "wk"]:
self.pdinterval = "w"
self.interval = "wk"
self.interval = "1" + self.interval
self.adjust_dividends = adjust_dividends
@property
def get_actions(self):
return self._get_actions
@property
def url(self):
return "https://finance.yahoo.com/quote/{}/history"
# Test test_get_data_interval() crashed because of this issue, probably
# whole yahoo part of package wasn't
# working properly
def _get_params(self, symbol):
# This needed because yahoo returns data shifted by 4 hours ago.
four_hours_in_seconds = 14400
unix_start = int(time.mktime(self.start.timetuple()))
unix_start += four_hours_in_seconds
day_end = self.end.replace(hour=23, minute=59, second=59)
unix_end = int(time.mktime(day_end.timetuple()))
unix_end += four_hours_in_seconds
params = {
"period1": unix_start,
"period2": unix_end,
"interval": self.interval,
"frequency": self.interval,
"filter": "history",
"symbol": symbol,
}
return params
def _read_one_data(self, url, params):
""" read one data from specified symbol """
symbol = params["symbol"]
del params["symbol"]
url = url.format(symbol)
resp = self._get_response(url, params=params)
ptrn = r"root\.App\.main = (.*?);\n}\(this\)\);"
try:
j = json.loads(re.search(ptrn, resp.text, re.DOTALL).group(1))
data = j["context"]["dispatcher"]["stores"]["HistoricalPriceStore"]
except KeyError:
msg = "No data fetched for symbol {} using {}"
raise RemoteDataError(msg.format(symbol, self.__class__.__name__))
# price data
prices = DataFrame(data["prices"])
prices.columns = [col.capitalize() for col in prices.columns]
prices["Date"] = to_datetime(
|
to_datetime(prices["Date"], unit="s")
|
pandas.to_datetime
|
from tests.deap.conftest import building_area, building_volume
import numpy as np
import pandas as pd
from pandas.testing import assert_series_equal
from ber_public.deap import vent
def test_calculate_infiltration_rate_due_to_openings():
"""Output is equivalent to DEAP 4.2.0 example A"""
building_volume = pd.Series([321, 0, 100, 200])
no_chimneys = pd.Series([0, 0, 0, 1])
no_open_flues = pd.Series([0, 0, 0, 1])
no_fans = pd.Series([1, 0, 0, 1])
no_room_heaters = pd.Series([0, 0, 0, 1])
is_draught_lobby = pd.Series(["NO", "NO", "YES", "NO"])
expected_output = pd.Series([0.08, 0, 0, 0.6])
output = vent._calculate_infiltration_rate_due_to_openings(
building_volume=building_volume,
no_chimneys=no_chimneys,
no_open_flues=no_open_flues,
no_fans=no_fans,
no_room_heaters=no_room_heaters,
is_draught_lobby=is_draught_lobby,
draught_lobby_boolean=vent.YES_NO,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_infiltration_rate_due_to_structure():
"""Output is equivalent to DEAP 4.2.0 example A"""
is_permeability_tested = pd.Series(["YES", "NO", "NO"])
permeability_test_result = pd.Series([0.15, np.nan, np.nan])
no_storeys = pd.Series([np.nan, 2, 1])
percentage_draught_stripped = pd.Series([np.nan, 100, 75])
is_floor_suspended = pd.Series(
[np.nan, "No ", "Yes (Unsealed) "]
)
structure_type = pd.Series(
[np.nan, "Masonry ", "Timber or Steel Frame "]
)
expected_output = pd.Series([0.15, 0.5, 0.55])
output = vent._calculate_infiltration_rate_due_to_structure(
is_permeability_tested=is_permeability_tested,
permeability_test_result=permeability_test_result,
no_storeys=no_storeys,
percentage_draught_stripped=percentage_draught_stripped,
is_floor_suspended=is_floor_suspended,
structure_type=structure_type,
suspended_floor_types=vent.SUSPENDED_FLOOR_TYPES,
structure_types=vent.STRUCTURE_TYPES,
permeability_test_boolean=vent.YES_NO,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_infiltration_rate(monkeypatch):
"""Output is equivalent to DEAP 4.2.0 example A"""
no_sides_sheltered = pd.Series([2, 2])
def _mock_calculate_infiltration_rate_due_to_openings(*args, **kwargs):
return pd.Series([0.08, 0.08])
def _mock_calculate_infiltration_rate_due_to_structure(*args, **kwargs):
return pd.Series([0.15, 0.5])
monkeypatch.setattr(
vent,
"_calculate_infiltration_rate_due_to_openings",
_mock_calculate_infiltration_rate_due_to_openings,
)
monkeypatch.setattr(
vent,
"_calculate_infiltration_rate_due_to_structure",
_mock_calculate_infiltration_rate_due_to_structure,
)
expected_output = pd.Series([0.2, 0.49])
output = vent.calculate_infiltration_rate(
no_sides_sheltered=no_sides_sheltered,
building_volume=None,
no_chimneys=None,
no_open_flues=None,
no_fans=None,
no_room_heaters=None,
is_draught_lobby=None,
is_permeability_tested=None,
permeability_test_result=None,
no_storeys=None,
percentage_draught_stripped=None,
is_floor_suspended=None,
structure_type=None,
draught_lobby_boolean=None,
suspended_floor_types=None,
structure_types=None,
permeability_test_boolean=None,
)
assert_series_equal(output.round(2), expected_output)
def test_calculate_effective_air_rate_change():
"""Output is equivalent to DEAP 4.2.0 example A"""
n_methods = 6
ventilation_method = pd.Series(
[
"Natural vent.",
"Pos input vent.- loft",
"Pos input vent.- outside",
"Whole house extract vent.",
"Bal.whole mech.vent no heat re",
"Bal.whole mech.vent heat recvr",
]
)
building_volume =
|
pd.Series([321] * n_methods)
|
pandas.Series
|
# --------------
#Importing header files
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#Path of the file
data=pd.read_csv(path)
data=data.rename(columns={'Total':'Total_Medals'})
print(data.head(10))
#Code starts here
# --------------
#Code starts h
data['Better_Event']=np.where(data['Total_Summer']>data['Total_Winter'],'Summer','Winter')
data['Better_Event']=np.where(data['Total_Summer']==data['Total_Winter'],'Both', data['Better_Event'])
#print(data['Total_Winter'].describe())
better_event=np.argmax(data['Better_Event'].value_counts())
better_event
# --------------
#Code starts here
top_countries=
|
pd.DataFrame(data,columns=['Country_Name','Total_Summer', 'Total_Winter','Total_Medals'])
|
pandas.DataFrame
|
"""Ark Model"""
__docformat__ = "numpy"
import json
import logging
import pandas as pd
import requests
import yfinance as yf
from bs4 import BeautifulSoup
from openbb_terminal.decorators import log_start_end
from openbb_terminal.helper_funcs import get_user_agent
logger = logging.getLogger(__name__)
@log_start_end(log=logger)
def get_ark_trades_by_ticker(ticker: str) -> pd.DataFrame:
"""Gets a dataframe of ARK trades for ticker
Parameters
----------
ticker : str
Ticker to get trades for
Returns
-------
pd.DataFrame
DataFrame of trades
"""
url = f"https://cathiesark.com/ark-combined-holdings-of-{ticker}"
r = requests.get(url, headers={"User-Agent": get_user_agent()})
# Error in request
if r.status_code != 200:
return
|
pd.DataFrame()
|
pandas.DataFrame
|
"""Parties have official names, short names, and nicknames.
This submodule attempts to link these different types of name.
The short name is the colloquial name as used by
https://www.parliament.uk/about/mps-and-lords/members/parties/ .
Example:
- official name: Conservative and Unionist Party
- short name: Conservative Party
- nickname: Tory
The nicknames come from our own survey data.
The "Green Party" is not one party but a collection of parties,
with distinct chapters in Scotland, Northern Ireland,
and England & Wales. The election data, when presented to the user,
distinguishes between these parties with their official names
(which contain their country identifier), but all three official
names are shortened to Green Party.
The Co-operative Party is in alliance with the Labour party,
and all their seats are recorded as Labour seats in the data.
The Speaker is regarded as a party of one member.
"""
import logging
import functools
from typing import Optional
import pandas as pd
import fuzzywuzzy.process
from . import data_tables
from . import exceptions
_logger = logging.getLogger("uk-politics")
@functools.lru_cache
def official(nickname: Optional[str],
allow_fuzzy_match=True,
warn_on_fuzzy_match=True,
exception_on_null_value=False) -> Optional[str]:
"""Return the official name of a party from a given nickname.
The function uses fuzzy match (Levenstein distance, from fuzzywuzzy)
to find the closest match by default;
set `allow_fuzzy_match=False` to turn this off.
These renames will appear as warnings.
This function is cached to avoid running extra fuzzy matches,
this also means that warnings will only appear the first time
a given renaming takes place. By default nicknames that `pandas`
considers to be a null value are passed through as None.
Args:
nickname (str): [description]
allow_fuzzy_match (bool, optional): [description]. Defaults to True.
warn_on_fuzzy_match (bool, optional): [description]. Defaults to True.
Raises:
exceptions.PartyNicknameEmpty: [description]
exceptions.PartyNameNotFound: [description]
Returns:
official_name (str): The official name for the party
that best matches our nicknames on record.
For example:
official_name("tories") -> "Conservative and Unionist Party"
"""
if
|
pd.isna(nickname)
|
pandas.isna
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
Interfaces to generate reportlets
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
import os
import os.path as op
import time
import json
import re
from collections import defaultdict
from mpl_toolkits.mplot3d import Axes3D # noqa: F401
import seaborn as sns
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.io.matlab import loadmat
import pandas as pd
import numpy as np
from nipype.interfaces.base import (
traits, TraitedSpec, BaseInterfaceInputSpec,
File, Directory, InputMultiPath, InputMultiObject, Str, isdefined,
SimpleInterface)
from nipype.interfaces import freesurfer as fs
from nipype.interfaces.mixins import reporting
import nibabel as nb
from dipy.core.sphere import HemiSphere
from .gradients import concatenate_bvals, concatenate_bvecs
from .qc import createB0_ColorFA_Mask_Sprites, createSprite4D
from .bids import get_bids_params
from ..niworkflows.viz.utils import peak_slice_series, odf_roi_plot
from .converters import fib2amps, mif2amps
SUBJECT_TEMPLATE = """\t<ul class="elem-desc">
\t\t<li>Subject ID: {subject_id}</li>
\t\t<li>Structural images: {n_t1s:d} T1-weighted {t2w}</li>
\t\t<li>Diffusion-weighted series: inputs {n_dwis:d}, outputs {n_outputs:d}</li>
{groupings}
\t\t<li>Resampling targets: {output_spaces}
\t\t<li>FreeSurfer reconstruction: {freesurfer_status}</li>
\t</ul>
"""
DIFFUSION_TEMPLATE = """\t\t<h3 class="elem-title">Summary</h3>
\t\t<ul class="elem-desc">
\t\t\t<li>Phase-encoding (PE) direction: {pedir}</li>
\t\t\t<li>Susceptibility distortion correction: {sdc}</li>
\t\t\t<li>Coregistration Transform: {coregistration}</li>
\t\t\t<li>Denoising Window: {denoise_window}</li>
\t\t\t<li>HMC Transform: {hmc_transform}</li>
\t\t\t<li>HMC Model: {hmc_model}</li>
\t\t\t<li>DWI series resampled to spaces: {output_spaces}</li>
\t\t\t<li>Confounds collected: {confounds}</li>
\t\t\t<li>Impute slice threshold: {impute_slice_threshold}</li>
\t\t</ul>
{validation_reports}
"""
ABOUT_TEMPLATE = """\t<ul>
\t\t<li>qsiprep version: {version}</li>
\t\t<li>qsiprep command: <code>{command}</code></li>
\t\t<li>Date preprocessed: {date}</li>
\t</ul>
</div>
"""
TOPUP_TEMPLATE = """\
\t\t<p class="elem-desc">
\t\t{summary}</p>
"""
GROUPING_TEMPLATE = """\t<ul>
\t\t<li>Output Name: {output_name}</li>
{input_files}
</ul>
"""
INTERACTIVE_TEMPLATE = """
<script src="https://unpkg.com/vue"></script>
<script src="https://nipreps.github.io/dmriprep-viewer/dmriprepReport.umd.min.js"></script>
<link rel="stylesheet" href="https://nipreps.github.io/dmriprep-viewer/dmriprepReport.css">
<div id="app">
<demo :report="report"></demo>
</div>
<script>
var report = REPORT
new Vue({
components: {
demo: dmriprepReport
},
data () {
return {
report
}
}
}).$mount('#app')
</script>
"""
class SummaryOutputSpec(TraitedSpec):
out_report = File(exists=True, desc='HTML segment containing summary')
class SummaryInterface(SimpleInterface):
output_spec = SummaryOutputSpec
def _generate_segment(self):
raise NotImplementedError()
def _run_interface(self, runtime):
segment = self._generate_segment()
fname = os.path.join(runtime.cwd, 'report.html')
with open(fname, 'w') as fobj:
fobj.write(segment)
self._results['out_report'] = fname
return runtime
class SubjectSummaryInputSpec(BaseInterfaceInputSpec):
t1w = InputMultiPath(File(exists=True), desc='T1w structural images')
t2w = InputMultiPath(File(exists=True), desc='T2w structural images')
subjects_dir = Directory(desc='FreeSurfer subjects directory')
subject_id = Str(desc='Subject ID')
dwi_groupings = traits.Dict(desc='groupings of DWI files and their output names')
output_spaces = traits.List(desc='Target spaces')
template = traits.Enum('MNI152NLin2009cAsym', desc='Template space')
class SubjectSummaryOutputSpec(SummaryOutputSpec):
# This exists to ensure that the summary is run prior to the first ReconAll
# call, allowing a determination whether there is a pre-existing directory
subject_id = Str(desc='FreeSurfer subject ID')
class SubjectSummary(SummaryInterface):
input_spec = SubjectSummaryInputSpec
output_spec = SubjectSummaryOutputSpec
def _run_interface(self, runtime):
if isdefined(self.inputs.subject_id):
self._results['subject_id'] = self.inputs.subject_id
return super(SubjectSummary, self)._run_interface(runtime)
def _generate_segment(self):
if not isdefined(self.inputs.subjects_dir):
freesurfer_status = 'Not run'
else:
recon = fs.ReconAll(subjects_dir=self.inputs.subjects_dir,
subject_id=self.inputs.subject_id,
T1_files=self.inputs.t1w,
flags='-noskullstrip')
if recon.cmdline.startswith('echo'):
freesurfer_status = 'Pre-existing directory'
else:
freesurfer_status = 'Run by qsiprep'
output_spaces = [self.inputs.template if space == 'template' else space
for space in self.inputs.output_spaces]
t2w_seg = ''
if self.inputs.t2w:
t2w_seg = '(+ {:d} T2-weighted)'.format(len(self.inputs.t2w))
# Add text for how the dwis are grouped
n_dwis = 0
n_outputs = 0
groupings = ''
if isdefined(self.inputs.dwi_groupings):
for output_fname, group_info in self.inputs.dwi_groupings.items():
n_outputs += 1
files_desc = []
files_desc.append(
'\t\t\t<li>Scan group: %s (PE Dir %s)</li><ul>' % (
output_fname, group_info['dwi_series_pedir']))
files_desc.append('\t\t\t\t<li>DWI Files: </li>')
for dwi_file in group_info['dwi_series']:
files_desc.append("\t\t\t\t\t<li> %s </li>" % dwi_file)
n_dwis += 1
fieldmap_type = group_info['fieldmap_info']['suffix']
if fieldmap_type is not None:
files_desc.append('\t\t\t\t<li>Fieldmap type: %s </li>' % fieldmap_type)
for key, value in group_info['fieldmap_info'].items():
files_desc.append("\t\t\t\t\t<li> %s: %s </li>" % (key, str(value)))
n_dwis += 1
files_desc.append("</ul>")
groupings += GROUPING_TEMPLATE.format(output_name=output_fname,
input_files='\n'.join(files_desc))
return SUBJECT_TEMPLATE.format(subject_id=self.inputs.subject_id,
n_t1s=len(self.inputs.t1w),
t2w=t2w_seg,
n_dwis=n_dwis,
n_outputs=n_outputs,
groupings=groupings,
output_spaces=', '.join(output_spaces),
freesurfer_status=freesurfer_status)
class DiffusionSummaryInputSpec(BaseInterfaceInputSpec):
distortion_correction = traits.Str(desc='Susceptibility distortion correction method',
mandatory=True)
pe_direction = traits.Enum(None, 'i', 'i-', 'j', 'j-', mandatory=True,
desc='Phase-encoding direction detected')
distortion_correction = traits.Str(mandatory=True, desc='Method used for SDC')
impute_slice_threshold = traits.CFloat(desc='threshold for imputing a slice')
hmc_transform = traits.Str(mandatory=True, desc='transform used during HMC')
hmc_model = traits.Str(desc='model used for hmc')
b0_to_t1w_transform = traits.Enum("Rigid", "Affine", desc='Transform type for coregistration')
dwi_denoise_window = traits.Int(desc='window size for dwidenoise')
output_spaces = traits.List(desc='Target spaces')
confounds_file = File(exists=True, desc='Confounds file')
validation_reports = InputMultiObject(File(exists=True))
class DiffusionSummary(SummaryInterface):
input_spec = DiffusionSummaryInputSpec
def _generate_segment(self):
if self.inputs.pe_direction is None:
pedir = 'MISSING - Assuming Anterior-Posterior'
else:
pedir = {'i': 'Left-Right', 'j': 'Anterior-Posterior'}[self.inputs.pe_direction[0]]
if isdefined(self.inputs.confounds_file):
with open(self.inputs.confounds_file) as cfh:
conflist = cfh.readline().strip('\n').strip()
else:
conflist = ''
validation_summaries = []
for summary in self.inputs.validation_reports:
with open(summary, 'r') as summary_f:
validation_summaries.extend(summary_f.readlines())
validation_summary = '\n'.join(validation_summaries)
return DIFFUSION_TEMPLATE.format(
pedir=pedir,
sdc=self.inputs.distortion_correction,
coregistration=self.inputs.b0_to_t1w_transform,
hmc_transform=self.inputs.hmc_transform,
hmc_model=self.inputs.hmc_model,
denoise_window=self.inputs.dwi_denoise_window,
output_spaces=', '.join(self.inputs.output_spaces),
confounds=re.sub(r'[\t ]+', ', ', conflist),
impute_slice_threshold=self.inputs.impute_slice_threshold,
validation_reports=validation_summary
)
class AboutSummaryInputSpec(BaseInterfaceInputSpec):
version = Str(desc='qsiprep version')
command = Str(desc='qsiprep command')
# Date not included - update timestamp only if version or command changes
class AboutSummary(SummaryInterface):
input_spec = AboutSummaryInputSpec
def _generate_segment(self):
return ABOUT_TEMPLATE.format(version=self.inputs.version,
command=self.inputs.command,
date=time.strftime("%Y-%m-%d %H:%M:%S %z"))
class TopupSummaryInputSpec(BaseInterfaceInputSpec):
summary = Str(desc='Summary of TOPUP inputs')
class TopupSummary(SummaryInterface):
input_spec = TopupSummaryInputSpec
def _generate_segment(self):
return TOPUP_TEMPLATE.format(summary=self.inputs.summary)
class GradientPlotInputSpec(BaseInterfaceInputSpec):
orig_bvec_files = InputMultiObject(File(exists=True), mandatory=True,
desc='bvecs from DWISplit')
orig_bval_files = InputMultiObject(File(exists=True), mandatory=True,
desc='bvals from DWISplit')
source_files = traits.List(desc='source file for each gradient')
final_bvec_file = File(exists=True, desc='bval file')
class GradientPlotOutputSpec(SummaryOutputSpec):
plot_file = File(exists=True)
class GradientPlot(SummaryInterface):
input_spec = GradientPlotInputSpec
output_spec = GradientPlotOutputSpec
def _run_interface(self, runtime):
outfile = os.path.join(runtime.cwd, "bvec_plot.gif")
sns.set_style("whitegrid")
sns.set_context("paper", font_scale=0.8)
orig_bvecs = concatenate_bvecs(self.inputs.orig_bvec_files)
bvals = concatenate_bvals(self.inputs.orig_bval_files, None)
if isdefined(self.inputs.source_files):
file_array = np.array(self.inputs.source_files)
_, filenums = np.unique(file_array, return_inverse=True)
else:
filenums = np.ones_like(bvals)
# Account for the possibility that this is a PE Pair average
if len(filenums) == len(bvals) * 2:
filenums = filenums[:len(bvals)]
# Plot the final bvecs if provided
final_bvecs = None
if isdefined(self.inputs.final_bvec_file):
final_bvecs = np.loadtxt(self.inputs.final_bvec_file).T
plot_gradients(bvals, orig_bvecs, filenums, outfile, final_bvecs)
self._results['plot_file'] = outfile
return runtime
def plot_gradients(bvals, orig_bvecs, source_filenums, output_fname, final_bvecs=None,
frames=60):
qrads = np.sqrt(bvals)
qvecs = (qrads[:, np.newaxis] * orig_bvecs)
qx, qy, qz = qvecs.T
maxvals = qvecs.max(0)
minvals = qvecs.min(0)
def add_lines(ax):
labels = ['L', 'P', 'S']
for axnum in range(3):
minvec = np.zeros(3)
maxvec = np.zeros(3)
minvec[axnum] = minvals[axnum]
maxvec[axnum] = maxvals[axnum]
x, y, z = np.column_stack([minvec, maxvec])
ax.plot(x, y, z, color="k")
txt_pos = maxvec + 5
ax.text(txt_pos[0], txt_pos[1], txt_pos[2], labels[axnum], size=8,
zorder=1, color='k')
if final_bvecs is not None:
if final_bvecs.shape[0] == 3:
final_bvecs = final_bvecs.T
fqx, fqy, fqz = (qrads[:, np.newaxis] * final_bvecs).T
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(10, 5),
subplot_kw={"aspect": "equal", "projection": "3d"})
orig_ax = axes[0]
final_ax = axes[1]
axes_list = [orig_ax, final_ax]
final_ax.scatter(fqx, fqy, fqz, c=source_filenums, marker="+")
orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+")
final_ax.axis('off')
add_lines(final_ax)
final_ax.set_title('After Preprocessing')
else:
fig, orig_ax = plt.subplots(nrows=1, ncols=1, figsize=(10, 5),
subplot_kw={"aspect": "equal", "projection": "3d"})
axes_list = [orig_ax]
orig_ax.scatter(qx, qy, qz, c=source_filenums, marker="+")
orig_ax.axis('off')
orig_ax.set_title("Original Scheme")
add_lines(orig_ax)
# Animate rotating the axes
rotate_amount = np.ones(frames) * 180 / frames
stay_put = np.zeros_like(rotate_amount)
rotate_azim = np.concatenate([rotate_amount, stay_put, -rotate_amount, stay_put])
rotate_elev = np.concatenate([stay_put, rotate_amount, stay_put, -rotate_amount])
plt.tight_layout()
def rotate(i):
for ax in axes_list:
ax.azim += rotate_azim[i]
ax.elev += rotate_elev[i]
return tuple(axes_list)
anim = animation.FuncAnimation(fig, rotate, frames=frames*4,
interval=20, blit=False)
anim.save(output_fname, writer='imagemagick', fps=32)
plt.close(fig)
fig = None
def topup_selection_to_report(selected_indices, original_files, spec_lookup,
image_source='combined DWI series'):
"""Write a description of how the images were selected for TOPUP.
>>> selected_indices = [0, 15, 30, 45]
>>> original_files = ["sub-1_dir-AP_dwi.nii.gz"] * 30 + ["sub-1_dir-PA_dwi.nii.gz"] * 30
>>> spec_lookup = {"sub-1_dir-AP_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 2 distortion groups was included in the combined dwi data. Distortion \
group '0 1 0 0.087' was represented by images 0, 15 from sub-1_dir-AP_dwi.nii.gz. \
Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz. "
Or
>>> selected_indices = [0, 15, 30, 45]
>>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [
... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [
... "sub-1_dir-PA_dwi.nii.gz"] * 30
>>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 2 distortion groups was included in the combined dwi data. Distortion \
group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz and \
image 0 from sub-1_dir-AP_run-2_dwi.nii.gz. Distortion group '0 -1 0 0.087' was represented \
by images 0, 15 from sub-1_dir-PA_dwi.nii.gz.
>>> selected_indices = [0, 15, 30, 45, 60]
>>> original_files = ["sub-1_dir-AP_run-1_dwi.nii.gz"] * 15 + [
... "sub-1_dir-AP_run-2_dwi.nii.gz"] * 15 + [
... "sub-1_dir-AP_run-3_dwi.nii.gz"] * 15 + [
... "sub-1_dir-PA_dwi.nii.gz"] * 30
>>> spec_lookup = {"sub-1_dir-AP_run-1_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-AP_run-2_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-AP_run-3_dwi.nii.gz": "0 1 0 0.087",
... "sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 2 distortion groups was included in the combined dwi data. Distortion \
group '0 1 0 0.087' was represented by image 0 from sub-1_dir-AP_run-1_dwi.nii.gz, \
image 0 from sub-1_dir-AP_run-2_dwi.nii.gz and image 0 from sub-1_dir-AP_run-3_dwi.nii.gz. \
Distortion group '0 -1 0 0.087' was represented by images 0, 15 from sub-1_dir-PA_dwi.nii.gz.
>>> selected_indices = [0, 15, 30, 45]
>>> original_files = ["sub-1_dir-PA_dwi.nii.gz"] * 60
>>> spec_lookup = {"sub-1_dir-PA_dwi.nii.gz": "0 -1 0 0.087"}
>>> print(topup_selection_to_report(selected_indices, original_files, spec_lookup))
A total of 1 distortion group was included in the combined dwi data. \
Distortion group '0 -1 0 0.087' was represented by images 0, 15, 30, 45 \
from sub-1_dir-PA_dwi.nii.gz.
"""
image_indices = defaultdict(list)
for imgnum, image in enumerate(original_files):
image_indices[image].append(imgnum)
# Collect the original volume number within each source image
selected_per_image = defaultdict(list)
for b0_index in selected_indices:
b0_image = original_files[b0_index]
first_index = min(image_indices[b0_image])
within_image_index = b0_index - first_index
selected_per_image[b0_image].append(within_image_index)
# Collect the images and indices within each warp group
selected_per_warp_group = defaultdict(list)
for original_image, selection in selected_per_image.items():
warp_group = spec_lookup[original_image]
selected_per_warp_group[warp_group].append((original_image, selection))
# Make the description
num_groups = len(selected_per_warp_group)
plural = 's' if num_groups > 1 else ''
plural2 = 'were' if plural == 's' else 'was'
desc = ["A total of {num_groups} distortion group{plural} {plural2} included in the "
"{image_source} data. ".format(num_groups=num_groups, plural=plural,
plural2=plural2, image_source=image_source)]
for distortion_group, image_list in selected_per_warp_group.items():
group_desc = [
"Distortion group '{spec}' was represented by ".format(spec=distortion_group)]
for image_name, image_indices in image_list:
formatted_indices = ", ".join(map(str, image_indices))
plural = 's' if len(image_indices) > 1 else ''
group_desc += [
"image{plural} {imgnums} from {img_name}".format(plural=plural,
imgnums=formatted_indices,
img_name=image_name),
", "]
group_desc[-1] = ". "
if len(image_list) > 1:
group_desc[-3] = " and "
desc += group_desc
return ''.join(desc)
class _SeriesQCInputSpec(BaseInterfaceInputSpec):
pre_qc = File(exists=True, desc='qc file from the raw data')
t1_qc = File(exists=True, desc='qc file from preprocessed image in t1 space')
mni_qc = File(exists=True, desc='qc file from preprocessed image in template space')
confounds_file = File(exists=True, desc='confounds file')
t1_dice_score = traits.Float()
mni_dice_score = traits.Float()
output_file_name = traits.File()
class _SeriesQCOutputSpec(TraitedSpec):
series_qc_file = File(exists=True)
class SeriesQC(SimpleInterface):
input_spec = _SeriesQCInputSpec
output_spec = _SeriesQCOutputSpec
def _run_interface(self, runtime):
image_qc = _load_qc_file(self.inputs.pre_qc, prefix="raw_")
if isdefined(self.inputs.t1_qc):
image_qc.update(_load_qc_file(self.inputs.t1_qc, prefix="t1_"))
if isdefined(self.inputs.mni_qc):
image_qc.update(_load_qc_file(self.inputs.mni_qc, prefix="mni_"))
motion_summary = calculate_motion_summary(self.inputs.confounds_file)
image_qc.update(motion_summary)
# Add in Dice scores if available
if isdefined(self.inputs.t1_dice_score):
image_qc['t1_dice_distance'] = [self.inputs.t1_dice_score]
if isdefined(self.inputs.mni_dice_score):
image_qc['mni_dice_distance'] = [self.inputs.mni_dice_score]
# Get the metadata
output_file = self.inputs.output_file_name
image_qc['file_name'] = output_file
bids_info = get_bids_params(output_file)
image_qc.update(bids_info)
output = op.join(runtime.cwd, "dwi_qc.csv")
pd.DataFrame(image_qc).to_csv(output, index=False)
self._results['series_qc_file'] = output
return runtime
def _load_qc_file(fname, prefix=""):
qc_data = pd.read_csv(fname).to_dict(orient='records')[0]
renamed = dict([
(prefix + key, value) for key, value in qc_data.items()])
return renamed
def motion_derivatives(translations, rotations, framewise_disp,
original_files):
def padded_diff(data):
out = np.zeros_like(data)
out[1:] = np.diff(data, axis=0)
return out
drotations = padded_diff(rotations)
dtranslations = padded_diff(translations)
# We don't want the relative values across the boundaries of runs.
# Determine which values should be ignored
file_labels, _ = pd.factorize(original_files)
new_files = padded_diff(file_labels)
def file_masked(data):
masked_data = data.copy()
masked_data[new_files > 0] = 0
return masked_data
framewise_disp = file_masked(framewise_disp)
return {
"mean_fd": [framewise_disp.mean()],
"max_fd": [framewise_disp.max()],
"max_rotation": [file_masked(np.abs(rotations)).max()],
"max_translation": [file_masked(np.abs(translations)).max()],
"max_rel_rotation": [file_masked(np.abs(drotations)).max()],
"max_rel_translation": [file_masked(np.abs(dtranslations)).max()]
}
def calculate_motion_summary(confounds_tsv):
if not isdefined(confounds_tsv) or confounds_tsv is None:
return {
"mean_fd": [np.nan],
"max_fd": [np.nan],
"max_rotation": [np.nan],
"max_translation": [np.nan],
"max_rel_rotation": [np.nan],
"max_rel_translation": [np.nan]
}
df = pd.read_csv(confounds_tsv, delimiter="\t")
# the default case where each output image comes from one input image
if 'trans_x' in df.columns:
translations = df[['trans_x', 'trans_y', 'trans_z']].values
rotations = df[['rot_x', 'rot_y', 'rot_z']].values
return motion_derivatives(translations, rotations, df['framewise_displacement'],
df['original_file'])
# If there was a PE Pair averaging, get motion from both
motion1 = motion_derivatives(df[['trans_x_1', 'trans_y_1', 'trans_z_1']].values,
df[['rot_x_1', 'rot_y_1', 'rot_z_1']].values,
df['framewise_displacement_1'],
df['original_file_1'])
motion2 = motion_derivatives(df[['trans_x_2', 'trans_y_2', 'trans_z_2']].values,
df[['rot_x_2', 'rot_y_2', 'rot_z_2']].values,
df['framewise_displacement_2'],
df['original_file_2'])
# Combine the FDs from both PE directions
# both_fd = np.column_stack([m1, m2])
# framewise_disp = both_fd[np.nanargmax(np.abs(both_fd), axis=1)]
def compare_series(key_name, comparator):
m1 = motion1[key_name][0]
m2 = motion2[key_name][0]
return [comparator(m1, m2)]
return {
"mean_fd": compare_series("mean_fd", lambda a, b: (a + b) / 2),
"max_fd": compare_series("max_fd", max),
"max_rotation": compare_series("max_rotation", max),
"max_translation": compare_series("max_translation", max),
"max_rel_rotation": compare_series("max_rel_rotation", max),
"max_rel_translation": compare_series("max_rel_translation", max)
}
class _InteractiveReportInputSpec(TraitedSpec):
raw_dwi_file = File(exists=True, mandatory=True)
processed_dwi_file = File(exists=True, mandatory=True)
confounds_file = File(exists=True, mandatory=True)
mask_file = File(exists=True, mandatory=True)
color_fa = File(exists=True, mandatory=True)
carpetplot_data = File(exists=True, mandatory=True)
series_qc_file = File(exists=True, mandatory=True)
class InteractiveReport(SimpleInterface):
input_spec = _InteractiveReportInputSpec
output_spec = SummaryOutputSpec
def _run_interface(self, runtime):
report = {}
report['dwi_corrected'] = createSprite4D(self.inputs.processed_dwi_file)
b0, colorFA, mask = createB0_ColorFA_Mask_Sprites(self.inputs.processed_dwi_file,
self.inputs.color_fa,
self.inputs.mask_file)
report['carpetplot'] = []
if isdefined(self.inputs.carpetplot_data):
with open(self.inputs.carpetplot_data, 'r') as carpet_f:
carpet_data = json.load(carpet_f)
report.update(carpet_data)
# Load the QC file
report['qc_scores'] = json.loads(
|
pd.read_csv(self.inputs.series_qc_file)
|
pandas.read_csv
|
# report result parser
# extracts layer execution times from reports generated by the openvino
# benchmark_app.py
import os
import pickle
import pandas
import argparse
import json
__author__ = "<NAME>"
__copyright__ = "Christian Doppler Laboratory for Embedded Machine Learning"
__license__ = "Apache 2.0"
def add_measured_to_input(time_df, input_df, measured_df):
"""adds info of measurement dataframe to time and layer execution dataframe
Args:
time_df: DataFrame with input layer names as columns, stores runtime
input_df: DataFrame with input layes names as columns, stores execution status
measured_df: DataFrame with measured layer names as rows, contains one measurement
Returns:
time_df and input_df
"""
# filter only available Layers
loc = len(input_df)
input_df = input_df.append(pandas.Series(), ignore_index=True)
time_df = time_df.append(pandas.Series(), ignore_index=True)
missing = {}
for c in input_df.columns:
if c in measured_df['LayerName'].values:
row = measured_df.loc[measured_df['LayerName'] == c]
input_df.at[loc, c] = row['ExecStatus'].values[0]
time_df.at[loc, c] = row['RunTime(ms)'].values[0]
measured_df = measured_df.loc[measured_df['LayerName'] != c]
else:
missing[c] = 0
# Input Layer treated separately
if c == 'x':
row = measured_df.loc[measured_df['LayerName'] == '<Extra>']
input_df.at[loc, c] = row['ExecStatus'].values[0]
time_df.at[loc, c] = row['RunTime(ms)'].values[0]
measured_df = measured_df.loc[measured_df['LayerName'] != '<Extra>']
# Look for missing Layers
print("\nMissing Layers:\n")
print(measured_df)
for c in missing.keys():
missing[c] = measured_df['LayerName'].str.contains(c+'_').sum()
cycle = len(missing)
for c in range(cycle):
v = min(missing, key=missing.get)
print('layer: ', v, ' found ', missing[v], ' time(s)')
#time_df.at[loc.v] = measured_df[measured_df['LayerName'].str.contains(c+'_')]['RunTime(ms)'].sum()
if(missing[v]) == 0:
input_df.at[loc, v] = 'REMOVED'
del missing[v]
else:
# print(measured_df[measured_df['LayerName'].str.contains(v+'_')])
time_sum = measured_df[measured_df['LayerName'].str.contains(
v+'_')]['RunTime(ms)'].sum()
input_df.at[loc, v] = 'EXECUTED'
time_df.at[loc, v] = time_sum
print('Time_sum', time_sum)
measured_df = measured_df[~measured_df['LayerName'].str.contains(
v+'_')]
del missing[v]
return time_df, input_df
def extract_data_from_ncs2_report(infold, outfold, report, format="pickle"):
"""Reads file in a pandas dataframe and writes layer data into a pickle file
Args:
infold: folder where the reports are contained
outfold: folder where the pickled data will be stored
report: filename of the report where the data will be extracted
format: data format to save the data with - either pickle or json
Returns: none
"""
try:
filename = os.path.join(infold, report)
print(filename)
data =
|
pandas.read_csv(filename, sep=";")
|
pandas.read_csv
|
# Copyright 2018-2021 <NAME>, alvarobartt @ GitHub
# See LICENSE for details.
from datetime import datetime, date, timedelta
import pytz
import json
from random import randint, sample
import string
import pandas as pd
import pkg_resources
import requests
from unidecode import unidecode
from lxml.html import fromstring
from .utils import constant as cst
from .utils.extra import random_user_agent
from .utils.data import Data
from .data.currency_crosses_data import currency_crosses_as_df, currency_crosses_as_list, currency_crosses_as_dict
from .data.currency_crosses_data import available_currencies_as_list
def get_currency_crosses(base=None, second=None):
"""
This function retrieves all the available currency crosses from Investing.com and returns them as a
:obj:`pandas.DataFrame`, which contains not just the currency crosses names, but all the fields contained on
the currency_crosses file. Note that the filtering params are both base and second, which mean the base and the
second currency of the currency cross, for example, in the currency cross `EUR/USD` the base currency is EUR and
the second currency is USD. These are optional parameters, so specifying one of them means that all the currency
crosses where the introduced currency is either base or second will be returned; if both are specified,
just the introduced currency cross will be returned if it exists. All the available currency crosses can be found
at: https://www.investing.com/currencies/
Args:
base (:obj:`str`, optional):
symbol of the base currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the base currency matches the introduced one.
second (:obj:`str`):
symbol of the second currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the second currency matches the introduced one.
Returns:
:obj:`pandas.DataFrame` - currency_crosses_df:
The resulting :obj:`pandas.DataFrame` contains all the currency crosses basic information retrieved from
Investing.com.
In case the information was successfully retrieved, the resulting :obj:`pandas.DataFrame` will look like::
name | full_name | base | second | base_name | second_name
-----|-----------|------|--------|-----------|-------------
xxxx | xxxxxxxxx | xxxx | xxxxxx | xxxxxxxxx | xxxxxxxxxxx
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `currency_crosses.csv` file was not found.
IOError: raised if currency crosses retrieval failed, both for missing file or empty file.
"""
return currency_crosses_as_df(base=base, second=second)
def get_currency_crosses_list(base=None, second=None):
"""
This function retrieves all the available currency crosses from Investing.com and returns them as a
:obj:`dict`, which contains not just the currency crosses names, but all the fields contained on
the currency_crosses file is columns is None, otherwise, just the specified column values will be returned. Note
that the filtering params are both base and second, which mean the base and the second currency of the currency
cross, for example, in the currency cross `EUR/USD` the base currency is EUR and the second currency is USD. These
are optional parameters, so specifying one of them means that all the currency crosses where the introduced
currency is either base or second will be returned; if both are specified, just the introduced currency cross will
be returned if it exists. All the available currency crosses can be found at: https://www.investing.com/currencies/
Args:
base (:obj:`str`, optional):
symbol of the base currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the base currency matches the introduced one.
second (:obj:`str`):
symbol of the second currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the second currency matches the introduced one.
Returns:
:obj:`list` - currency_crosses_list:
The resulting :obj:`list` contains the retrieved data from the `currency_crosses.csv` file, which is
a listing of the names of the currency crosses listed in Investing.com, which is the input for data
retrieval functions as the name of the currency cross to retrieve data from needs to be specified.
In case the listing was successfully retrieved, the :obj:`list` will look like::
currency_crosses_list = [
'USD/BRLT', 'CAD/CHF', 'CHF/CAD', 'CAD/PLN', 'PLN/CAD', ...
]
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `currency_crosses.csv` file was not found.
IOError: raised if currency crosses retrieval failed, both for missing file or empty file.
"""
return currency_crosses_as_list(base=base, second=second)
def get_currency_crosses_dict(base=None, second=None, columns=None, as_json=False):
"""
This function retrieves all the available currency crosses from Investing.com and returns them as a
:obj:`dict`, which contains not just the currency crosses names, but all the fields contained on
the currency_crosses file is columns is None, otherwise, just the specified column values will be returned. Note
that the filtering params are both base and second, which mean the base and the second currency of the currency
cross, for example, in the currency cross `EUR/USD` the base currency is EUR and the second currency is USD. These
are optional parameters, so specifying one of them means that all the currency crosses where the introduced
currency is either base or second will be returned; if both are specified, just the introduced currency cross will
be returned if it exists. All the available currency crosses can be found at: https://www.investing.com/currencies/
Args:
base (:obj:`str`, optional):
symbol of the base currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the base currency matches the introduced one.
second (:obj:`str`):
symbol of the second currency of the currency cross, this will return a :obj:`pandas.DataFrame` containing
all the currency crosses where the second currency matches the introduced one.
columns (:obj:`list`, optional):
names of the columns of the currency crosses data to retrieve <name, full_name, base, base_name,
second, second_name>
as_json (:obj:`bool`, optional):
value to determine the format of the output data which can either be a :obj:`dict` or a :obj:`json`.
Returns:
:obj:`dict` or :obj:`json` - currency_crosses_dict:
The resulting :obj:`dict` contains the retrieved data if found, if not, the corresponding
fields are filled with `None` values.
In case the information was successfully retrieved, the :obj:`dict` will look like::
{
'name': name,
'full_name': full_name,
'base': base,
'base_name': base_name,
'second': second,
'second_name': second_name
}
Raises:
ValueError: raised if any of the introduced arguments is not valid or errored.
FileNotFoundError: raised if `currency_crosses.csv` file was not found.
IOError: raised if currency crosses retrieval failed, both for missing file or empty file.
"""
return currency_crosses_as_dict(base=base, second=second, columns=columns, as_json=as_json)
def get_available_currencies():
"""
This function retrieves a listing with all the available currencies with indexed currency crosses in order to
get to know which are the available currencies. The currencies listed in this function, so on, can be used to
search currency crosses and used the retrieved data to get historical data of those currency crosses, so to
determine which is the value of one base currency in the second currency.
Returns:
:obj:`list` - available_currencies:
The resulting :obj:`list` contains all the available currencies with currency crosses being either the base
or the second value of the cross, as listed in Investing.com.
In case the listing was successfully retrieved, the :obj:`list` will look like::
available_currencies = [
'AED', 'AFN', 'ALL', 'AMD', 'ANG', ...
]
Raises:
FileNotFoundError: raised if `currency_crosses.csv` file was not found.
IOError: raised if currency crosses retrieval failed, both for missing file or empty file.
"""
return available_currencies_as_list()
def get_currency_cross_recent_data(currency_cross, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves recent historical data from the introduced `currency_cross` as indexed in Investing.com
via Web Scraping. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a
:obj:`json` file, with `ascending` or `descending` order.
Args:
currency_cross (:obj:`str`): name of the currency_cross to retrieve recent historical data from.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).
order (:obj:`str`, optional):
optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`).
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function returns a either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved
recent data from the specified currency_cross via argument. The dataset contains the open, high, low, close,
volume and currency values for the selected currency_cross on market days.
The return data is in case we use default arguments will look like::
Date || Open | High | Low | Close | Currency
-----||------|------|-----|-------|---------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
recent: [
dd/mm/yyyy: {
'open': x,
'high': x,
'low': x,
'close': x,
'currency' : x
},
...
]
}
Raises:
ValueError: raised if any of the introduced arguments was not valid or errored.
IOError: raised if currency_crosses object/file not found or unable to retrieve.
RuntimeError: raised introduced currency_cross does not match any of the indexed ones.
ConnectionError: raised if GET request did not return 200 status code.
IndexError: raised if currency_cross information was unavailable or not found.
Examples:
>>> data = investpy.get_currency_cross_recent_data(currency_cross='EUR/USD')
>>> data.head()
Open High Low Close Currency
Date
2019-08-27 1.1101 1.1116 1.1084 1.1091 USD
2019-08-28 1.1090 1.1099 1.1072 1.1078 USD
2019-08-29 1.1078 1.1093 1.1042 1.1057 USD
2019-08-30 1.1058 1.1062 1.0963 1.0991 USD
2019-09-02 1.0990 1.1000 1.0958 1.0968 USD
"""
if not currency_cross:
raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.")
if not isinstance(currency_cross, str):
raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
interval = interval.lower()
if interval not in ['daily', 'weekly', 'monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'currency_crosses.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
currency_crosses = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0060: currency_crosses file not found or errored.")
if currency_crosses is None:
raise IOError("ERR#0050: currency_crosses not found or unable to retrieve.")
currency_cross = unidecode(currency_cross.strip().lower())
if currency_cross not in list(currency_crosses['name'].apply(unidecode).str.lower()):
raise RuntimeError("ERR#0054: the introduced currency_cross " + str(currency_cross) + " does not exist.")
id_ = currency_crosses.loc[(currency_crosses['name'].apply(unidecode).str.lower() == currency_cross).idxmax(), 'id']
name = currency_crosses.loc[(currency_crosses['name'].apply(unidecode).str.lower() == currency_cross).idxmax(), 'name']
currency = currency_crosses.loc[(currency_crosses['name'].apply(unidecode).str.lower() == currency_cross).idxmax(), 'second']
header = name + ' Historical Data'
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"interval_sec": interval.capitalize(),
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
if elements_.xpath(".//td")[0].text_content() == 'No results found':
raise IndexError("ERR#0055: currency_cross information unavailable or not found.")
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
currency_cross_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d')
currency_cross_close = float(info[1].replace(',', ''))
currency_cross_open = float(info[2].replace(',', ''))
currency_cross_high = float(info[3].replace(',', ''))
currency_cross_low = float(info[4].replace(',', ''))
result.insert(len(result),
Data(currency_cross_date, currency_cross_open, currency_cross_high, currency_cross_low,
currency_cross_close, None, currency, None))
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_ = {
'name': name,
'recent':
[value.currency_cross_as_json() for value in result]
}
return json.dumps(json_, sort_keys=False)
elif as_json is False:
df = pd.DataFrame.from_records([value.currency_cross_to_dict() for value in result])
df.set_index('Date', inplace=True)
return df
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
def get_currency_cross_historical_data(currency_cross, from_date, to_date, as_json=False, order='ascending', interval='Daily'):
"""
This function retrieves recent historical data from the introduced `currency_cross` from Investing
via Web Scraping. The resulting data can it either be stored in a :obj:`pandas.DataFrame` or in a
:obj:`json` file, with `ascending` or `descending` order.
Args:
currency_cross (:obj:`str`): name of the currency cross to retrieve recent historical data from.
from_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, from where data is going to be retrieved.
to_date (:obj:`str`): date as `str` formatted as `dd/mm/yyyy`, until where data is going to be retrieved.
as_json (:obj:`bool`, optional):
optional argument to determine the format of the output data (:obj:`pandas.DataFrame` or :obj:`json`).
order (:obj:`str`, optional):
optional argument to define the order of the retrieved data (`ascending`, `asc` or `descending`, `desc`).
interval (:obj:`str`, optional):
value to define the historical data interval to retrieve, by default `Daily`, but it can also be `Weekly` or `Monthly`.
Returns:
:obj:`pandas.DataFrame` or :obj:`json`:
The function returns a either a :obj:`pandas.DataFrame` or a :obj:`json` file containing the retrieved
recent data from the specified currency_cross via argument. The dataset contains the open, high, low, close and
volume values for the selected currency_cross on market days.
The return data is case we use default arguments will look like::
Date || Open | High | Low | Close | Currency
-----||------|------|-----|-------|---------
xxxx || xxxx | xxxx | xxx | xxxxx | xxxxxxxx
but if we define `as_json=True`, then the output will be::
{
name: name,
historical: [
dd/mm/yyyy: {
'open': x,
'high': x,
'low': x,
'close': x,
'currency' : x
},
...
]
}
Raises:
ValueError: argument error.
IOError: stocks object/file not found or unable to retrieve.
RuntimeError: introduced currency_cross does not match any of the indexed ones.
ConnectionError: if GET requests does not return 200 status code.
IndexError: if currency_cross information was unavailable or not found.
Examples:
>>> data = investpy.get_currency_cross_historical_data(currency_cross='EUR/USD', from_date='01/01/2018', to_date='01/01/2019')
>>> data.head()
Open High Low Close Currency
Date
2018-01-01 1.2003 1.2014 1.1995 1.2010 USD
2018-01-02 1.2013 1.2084 1.2003 1.2059 USD
2018-01-03 1.2058 1.2070 1.2001 1.2014 USD
2018-01-04 1.2015 1.2090 1.2004 1.2068 USD
2018-01-05 1.2068 1.2085 1.2021 1.2030 USD
"""
if not currency_cross:
raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.")
if not isinstance(currency_cross, str):
raise ValueError("ERR#0052: currency_cross param is mandatory and should be a str.")
try:
datetime.strptime(from_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.")
try:
datetime.strptime(to_date, '%d/%m/%Y')
except ValueError:
raise ValueError("ERR#0011: incorrect data format, it should be 'dd/mm/yyyy'.")
start_date = datetime.strptime(from_date, '%d/%m/%Y')
end_date = datetime.strptime(to_date, '%d/%m/%Y')
if start_date >= end_date:
raise ValueError("ERR#0032: to_date should be greater than from_date, both formatted as 'dd/mm/yyyy'.")
if not isinstance(as_json, bool):
raise ValueError("ERR#0002: as_json argument can just be True or False, bool type.")
if order not in ['ascending', 'asc', 'descending', 'desc']:
raise ValueError("ERR#0003: order argument can just be ascending (asc) or descending (desc), str type.")
if not interval:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
if not isinstance(interval, str):
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
interval = interval.lower()
if interval not in ['daily', 'weekly', 'monthly']:
raise ValueError("ERR#0073: interval value should be a str type and it can just be either 'Daily', 'Weekly' or 'Monthly'.")
date_interval = {
'intervals': [],
}
flag = True
while flag is True:
diff = end_date.year - start_date.year
if diff > 19:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': start_date.replace(year=start_date.year + 19).strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
start_date = start_date.replace(year=start_date.year + 19) + timedelta(days=1)
else:
obj = {
'start': start_date.strftime('%m/%d/%Y'),
'end': end_date.strftime('%m/%d/%Y'),
}
date_interval['intervals'].append(obj)
flag = False
interval_limit = len(date_interval['intervals'])
interval_counter = 0
data_flag = False
resource_package = 'investpy'
resource_path = '/'.join(('resources', 'currency_crosses.csv'))
if pkg_resources.resource_exists(resource_package, resource_path):
currency_crosses = pd.read_csv(pkg_resources.resource_filename(resource_package, resource_path))
else:
raise FileNotFoundError("ERR#0060: currency_crosses file not found or errored.")
if currency_crosses is None:
raise IOError("ERR#0050: currency_crosses not found or unable to retrieve.")
currency_cross = unidecode(currency_cross.strip().lower())
if currency_cross not in list(currency_crosses['name'].apply(unidecode).str.lower()):
raise RuntimeError("ERR#0054: the introduced currency_cross " + str(currency_cross) + " does not exist.")
id_ = currency_crosses.loc[(currency_crosses['name'].apply(unidecode).str.lower() == currency_cross).idxmax(), 'id']
name = currency_crosses.loc[(currency_crosses['name'].apply(unidecode).str.lower() == currency_cross).idxmax(), 'name']
currency = currency_crosses.loc[(currency_crosses['name'].apply(unidecode).str.lower() == currency_cross).idxmax(), 'second']
final = list()
header = name + ' Historical Data'
for index in range(len(date_interval['intervals'])):
interval_counter += 1
params = {
"curr_id": id_,
"smlID": str(randint(1000000, 99999999)),
"header": header,
"st_date": date_interval['intervals'][index]['start'],
"end_date": date_interval['intervals'][index]['end'],
"interval_sec": interval.capitalize(),
"sort_col": "date",
"sort_ord": "DESC",
"action": "historical_data"
}
head = {
"User-Agent": random_user_agent(),
"X-Requested-With": "XMLHttpRequest",
"Accept": "text/html",
"Accept-Encoding": "gzip, deflate, br",
"Connection": "keep-alive",
}
url = "https://www.investing.com/instruments/HistoricalDataAjax"
req = requests.post(url, headers=head, data=params)
if req.status_code != 200:
raise ConnectionError("ERR#0015: error " + str(req.status_code) + ", try again later.")
if not req.text:
continue
root_ = fromstring(req.text)
path_ = root_.xpath(".//table[@id='curr_table']/tbody/tr")
result = list()
if path_:
for elements_ in path_:
info = []
for nested_ in elements_.xpath(".//td"):
info.append(nested_.get('data-real-value'))
if elements_.xpath(".//td")[0].text_content() == 'No results found':
if interval_counter < interval_limit:
data_flag = False
else:
raise IndexError("ERR#0055: currency_cross information unavailable or not found.")
else:
data_flag = True
if data_flag is True:
currency_cross_date = datetime.strptime(str(datetime.fromtimestamp(int(info[0]), tz=pytz.timezone('GMT')).date()), '%Y-%m-%d')
currency_cross_close = float(info[1].replace(',', ''))
currency_cross_open = float(info[2].replace(',', ''))
currency_cross_high = float(info[3].replace(',', ''))
currency_cross_low = float(info[4].replace(',', ''))
result.insert(len(result),
Data(currency_cross_date, currency_cross_open, currency_cross_high, currency_cross_low,
currency_cross_close, None, currency, None))
if data_flag is True:
if order in ['ascending', 'asc']:
result = result[::-1]
elif order in ['descending', 'desc']:
result = result
if as_json is True:
json_list = [value.currency_cross_as_json() for value in result]
final.append(json_list)
elif as_json is False:
df = pd.DataFrame.from_records([value.currency_cross_to_dict() for value in result])
df.set_index('Date', inplace=True)
final.append(df)
else:
raise RuntimeError("ERR#0004: data retrieval error while scraping.")
if order in ['descending', 'desc']:
final.reverse()
if as_json is True:
json_ = {
'name': name,
'historical': [value for json_list in final for value in json_list]
}
return json.dumps(json_, sort_keys=False)
elif as_json is False:
return
|
pd.concat(final)
|
pandas.concat
|
# -*- coding: utf-8 -*-
#
# License: This module is released under the terms of the LICENSE file
# contained within this applications INSTALL directory
"""
Defines the ForecastModel class, which encapsulates model functions used in
forecast model fitting, as well as their number of parameters and
initialisation parameters.
"""
# -- Coding Conventions
# http://www.python.org/dev/peps/pep-0008/ - Use the Python style guide
# http://sphinx.pocoo.org/rest.html - Use Restructured Text for
# docstrings
# -- Public Imports
import itertools
import logging
import numpy as np
import pandas as pd
from pandas.tseries.holiday import Holiday, AbstractHolidayCalendar, \
MO, nearest_workday, next_monday, next_monday_or_tuesday, \
GoodFriday, EasterMonday, USFederalHolidayCalendar
from pandas.tseries.offsets import DateOffset
from datetime import datetime
# -- Private Imports
from anticipy import model_utils
# -- Globals
logger = logging.getLogger(__name__)
# Fourier model configuration
_dict_fourier_config = { # Default configuration for fourier-based models
'period': 365.25, # days in year
'harmonics': 10 # TODO: evaluate different harmonics values
}
_FOURIER_PERIOD = 365.25
_FOURIER_HARMONICS = 10 # TODO: evaluate different harmonics values
_FOURIER_K = (2.0 * np.pi / _FOURIER_PERIOD)
_FOURIER_I = np.arange(1, _FOURIER_HARMONICS + 1)
_FOURIER_DATE_ORIGIN = datetime(1970, 1, 1)
# -- Functions
# ---- Utility functions
def logger_info(msg, data):
# Convenience function for easier log typing
logger.info(msg + '\n%s', data)
def _get_f_init_params_default(n_params):
# Generate a default function for initialising model parameters: use
# random values between 0 and 1
return lambda a_x=None, a_y=None, a_date=None, is_mult=False:\
np.random.uniform(low=0.001, high=1, size=n_params)
def _get_f_bounds_default(n_params):
# Generate a default function for model parameter boundaries. Default
# boundaries are (-inf, inf)
return lambda a_x=None, a_y=None, a_date=None: (
n_params * [-np.inf], n_params * [np.inf])
def _get_f_add_2_f_models(forecast_model1, forecast_model2):
# Add model functions of 2 ForecastModels
def f_add_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=False,
**kwargs) +
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=False,
**kwargs))
return f_add_2_f_models
def _get_f_mult_2_f_models(forecast_model1, forecast_model2):
# Multiply model functions of 2 ForecastModels
def f_mult_2_f_models(a_x, a_date, params, is_mult=False, **kwargs):
params1 = params[0:forecast_model1.n_params]
params2 = params[forecast_model1.n_params:]
return (
forecast_model1.f_model(
a_x,
a_date,
params1,
is_mult=True,
**kwargs) *
forecast_model2.f_model(
a_x,
a_date,
params2,
is_mult=True,
**kwargs))
return f_mult_2_f_models
def _get_f_add_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# addition
def f_add_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=False),
f_init_params2(a_x, a_y, a_date, is_mult=False)])
return f_add_2_f_init_params
def _get_f_mult_2_f_init_params(f_init_params1, f_init_params2):
# Compose parameter initialisation functions of 2 ForecastModels, using
# multiplication
def f_mult_2_f_init_params(a_x, a_y, a_date=None, is_mult=False):
return np.concatenate(
[f_init_params1(a_x, a_y, a_date, is_mult=True),
f_init_params2(a_x, a_y, a_date, is_mult=True)])
return f_mult_2_f_init_params
def _get_f_concat_2_bounds(forecast_model1, forecast_model2):
# Compose parameter boundary functions of 2 ForecastModels
def f_add_2_f_bounds(a_x, a_y, a_date=None):
return np.concatenate(
(forecast_model1.f_bounds(
a_x, a_y, a_date), forecast_model2.f_bounds(
a_x, a_y, a_date)), axis=1)
return f_add_2_f_bounds
def _f_validate_input_default(a_x, a_y, a_date):
# Default input validation function for a ForecastModel. Always returns
# True
return True
def _as_list(l):
return l if isinstance(l, (list,)) else [l]
# Functions used to initialize cache variables in a ForecastModel
def _f_init_cache_a_month(a_x, a_date):
return a_date.month - 1
def _f_init_cache_a_weekday(a_x, a_date):
return a_date.weekday
def _f_init_cache_a_t_fourier(a_x, a_date):
# convert to days since epoch
t = (a_date - _FOURIER_DATE_ORIGIN).days.values
i = np.arange(1, _FOURIER_HARMONICS + 1)
a_tmp = _FOURIER_K * i.reshape(i.size, 1) * t
y = np.concatenate([np.sin(a_tmp), np.cos(a_tmp)])
return y
# Dictionary to store functions used to initialize cache variables
# in a ForecastModel
# This is shared across all ForecastModel instances
_dict_f_cache = dict(
a_month=_f_init_cache_a_month,
a_weekday=_f_init_cache_a_weekday,
a_t_fourier=_f_init_cache_a_t_fourier
)
# -- Classes
class ForecastModel:
"""
Class that encapsulates model functions for use in forecasting, as well as
their number of parameters and functions for parameter initialisation.
A ForecastModel instance is initialized with a model name, a number of
model parameters, and a model function. Class instances are
callable - when called as a function, their internal model function is
used. The main purpose of ForecastModel objects is to generate predicted
values for a time series, given a set of parameters. These values can be
compared to the original series to get an array of residuals::
y_predicted = model(a_x, a_date, params)
residuals = (a_y - y_predicted)
This is used in an optimization loop to obtain the optimal parameters for
the model.
The reason for using this class instead of raw model functions is that
ForecastModel supports function composition::
model_sum = fcast_model1 + fcast_model2
# fcast_model 1 and 2 are ForecastModel instances, and so is model_sum
a_y1 = fcast_model1(
a_x, a_date, params1) + fcast_model2(a_x, a_date, params2)
params = np.concatenate([params1, params2])
a_y2 = model_sum(a_x, a_date, params)
a_y1 == a_y2 # True
Forecast models can be added or multiplied, with the + and * operators.
Multiple levels of composition are supported::
model = (model1 + model2) * model3
Model composition is used to aggregate trend and seasonality model
components, among other uses.
Model functions have the following signature:
- f(a_x, a_date, params, is_mult)
- a_x : array of floats
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- params: array of floats - model parameters - the optimisation loop
updates this to fit our actual values. Each
model function uses a fixed number of parameters.
- is_mult: boolean. True if the model is being used with multiplicative
composition. Required because
some model functions (e.g. steps) have different behaviour
when added to other models than when multiplying them.
- returns an array of floats - with same length as a_x - output of the
model defined by this object's modelling function f_model and the
current set of parameters
By default, model parameters are initialized as random values between
0 and 1. It is possible to define a parameter initialization function
that picks initial values based on the original time series.
This is passed during ForecastModel creation with the argument
f_init_params. Parameter initialization is compatible with model
composition: the initialization function of each component will be used
for that component's parameters.
Parameter initialisation functions have the following signature:
- f_init_params(a_x, a_y, is_mult)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- returns an array of floats - with length equal to this object's n_params
value
By default, model parameters have no boundaries. However, it is possible
to define a boundary function for a model, that sets boundaries for each
model parameter, based on the input time series. This is passed during
ForecastModel creation with the argument f_bounds.
Boundary definition is compatible with model composition:
the boundary function of each component will be used for that component's
parameters.
Boundary functions have the following signature:
- f_bounds(a_x, a_y, a_date)
- a_x: array of floats - same length as time series
- a_y: array of floats - time series values
- a_date: array of dates, same length as a_x. Only required for date-aware
models, e.g. for weekly seasonality.
- returns a tuple of 2 arrays of floats. The first defines minimum
parameter boundaries, and the second the maximum parameter boundaries.
As an option, we can assign a list of input validation functions to a
model. These functions analyse the inputs that will be used for fitting a
model, returning True if valid, and False otherwise. The forecast logic
will skip a model from fitting if any of the validation functions for that
model returns False.
Input validation functions have the following signature:
- f_validate_input(a_x, a_y, a_date)
- See the description of model functions above for more details on these
parameters.
Our input time series should meet the following constraints:
- Minimum required samples depends on number of model parameters
- May include null values
- May include multiple values per sample
- A date array is only required if the model is date-aware
Class Usage::
model_x = ForecastModel(name, n_params, f_model, f_init_params,
l_f_validate_input)
# Get model name
model_name = model_x.name
# Get number of model parameters
n_params = model_x.n_params
# Get parameter initialisation function
f_init_params = model_x.f_init_params
# Get initial parameters
init_params = f_init_params(t_values, y_values)
# Get model fitting function
f_model = model_x.f_model
# Get model output
y = f_model(a_x, a_date, parameters)
The following pre-generated models are available. They are available as attributes from this module: # noqa
.. csv-table:: Forecast models
:header: "name", "params", "formula","notes"
:widths: 20, 10, 20, 40
"model_null",0, "y=0", "Does nothing.
Used to disable components (e.g. seasonality)"
"model_constant",1, "y=A", "Constant model"
"model_linear",2, "y=Ax + B", "Linear model"
"model_linear_nondec",2, "y=Ax + B", "Non decreasing linear model.
With boundaries to ensure model slope >=0"
"model_quasilinear",3, "y=A*(x^B) + C", "Quasilinear model"
"model_exp",2, "y=A * B^x", "Exponential model"
"model_decay",4, "Y = A * e^(B*(x-C)) + D", "Exponential decay model"
"model_step",2, "y=0 if x<A, y=B if x>=A", "Step model"
"model_two_steps",4, "see model_step", "2 step models.
Parameter initialization is aware of # of steps."
"model_sigmoid_step",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))
", "Sigmoid step model"
"model_sigmoid",3, "y = A + (B - A) / (1 + np.exp(- D * (x - C)))", "
Sigmoid model"
"model_season_wday",7, "see desc.", "Weekday seasonality model.
Assigns a constant value to each weekday"
"model_season_wday",6, "see desc.", "6-param weekday seasonality model.
As above, with one constant set to 0."
"model_season_wday_2",2, "see desc.", "Weekend seasonality model.
Assigns a constant to each of weekday/weekend"
"model_season_month",12, "see desc.", "Month seasonality model.
Assigns a constant value to each month"
"model_season_fourier_yearly",10, "see desc", "Fourier
yearly seasonality model"
"""
def __init__(
self,
name,
n_params,
f_model,
f_init_params=None,
f_bounds=None,
l_f_validate_input=None,
l_cache_vars=None,
dict_f_cache=None,
):
"""
Create ForecastModel
:param name: Model name
:type name: basestring
:param n_params: Number of parameters for model function
:type n_params: int
:param f_model: Model function
:type f_model: function
:param f_init_params: Parameter initialisation function
:type f_init_params: function
:param f_bounds: Boundary function
:type f_bounds: function
"""
self.name = name
self.n_params = n_params
self.f_model = f_model
if f_init_params is not None:
self.f_init_params = f_init_params
else:
# Default initial parameters: random values between 0 and 1
self.f_init_params = _get_f_init_params_default(n_params)
if f_bounds is not None:
self.f_bounds = f_bounds
else:
self.f_bounds = _get_f_bounds_default(n_params)
if l_f_validate_input is None:
self.l_f_validate_input = [_f_validate_input_default]
else:
self.l_f_validate_input = _as_list(l_f_validate_input)
if l_cache_vars is None:
self.l_cache_vars = []
else:
self.l_cache_vars = _as_list(l_cache_vars)
if dict_f_cache is None:
self.dict_f_cache = dict()
else:
self.dict_f_cache = dict_f_cache
# TODO - REMOVE THIS - ASSUME NORMALIZED INPUT
def _get_f_init_params_validated(f_init_params):
# Adds argument validation to a parameter initialisation function
def f_init_params_validated(
a_x=None, a_y=None, a_date=None, is_mult=False):
if a_x is not None and pd.isnull(a_x).any():
raise ValueError('a_x cannot have null values')
return f_init_params(a_x, a_y, a_date, is_mult)
return f_init_params_validated
# Add logic to f_init_params that validates input
self.f_init_params = _get_f_init_params_validated(self.f_init_params)
def __call__(self, a_x, a_date, params, is_mult=False, **kwargs):
# assert len(params)==self.n_params
return self.f_model(a_x, a_date, params, is_mult, **kwargs)
def __str__(self):
return self.name
def __repr__(self):
return 'ForecastModel:{}'.format(self.name)
def __add__(self, forecast_model):
# Check for nulls
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}+{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_add_2_f_models(self, forecast_model)
f_init_params = _get_f_add_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __radd__(self, other):
return self.__add__(other)
def __mul__(self, forecast_model):
if self.name == 'null':
return forecast_model
if forecast_model.name == 'null':
return self
name = '({}*{})'.format(self.name, forecast_model.name)
n_params = self.n_params + forecast_model.n_params
f_model = _get_f_mult_2_f_models(self, forecast_model)
f_init_params = _get_f_mult_2_f_init_params(
self.f_init_params, forecast_model.f_init_params)
f_bounds = _get_f_concat_2_bounds(self, forecast_model)
l_f_validate_input = list(
set(self.l_f_validate_input + forecast_model.l_f_validate_input))
# Combine both dicts
dict_f_cache = self.dict_f_cache.copy()
dict_f_cache.update(forecast_model.dict_f_cache)
l_cache_vars = list(
set(self.l_cache_vars + forecast_model.l_cache_vars))
return ForecastModel(
name,
n_params,
f_model,
f_init_params,
f_bounds=f_bounds,
l_f_validate_input=l_f_validate_input,
l_cache_vars=l_cache_vars,
dict_f_cache=dict_f_cache
)
def __rmul__(self, other):
return self.__mul__(other)
def __eq__(self, other):
if isinstance(self, other.__class__):
return self.name == other.name
return NotImplemented
def __ne__(self, other):
x = self.__eq__(other)
if x is not NotImplemented:
return not x
return NotImplemented
def __hash__(self):
return hash(self.name)
def __lt__(self, other):
return self.name < other.name
def validate_input(self, a_x, a_y, a_date):
try:
l_result = [f_validate_input(a_x, a_y, a_date)
for f_validate_input in self.l_f_validate_input]
except AssertionError:
return False
return True
def init_cache(self, a_x, a_date):
dict_cache_vars = dict()
for k in self.l_cache_vars:
f = _dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
# Search vars defined in internal cache function dictionary
for k in self.dict_f_cache:
f = self.dict_f_cache.get(k)
if f:
dict_cache_vars[k] = f(a_x, a_date)
else:
logger.warning('Cache function not found: %s', k)
return dict_cache_vars
# - Null model: 0
def _f_model_null(a_x, a_date, params, is_mult=False, **kwargs):
# This model does nothing - used to disable model components
# (e.g. seasonality) when adding/multiplying multiple functions
return float(is_mult) # Returns 1 if multiplying, 0 if adding
model_null = ForecastModel('null', 0, _f_model_null)
# - Constant model: :math:`Y = A`
def _f_model_constant(a_x, a_date, params, is_mult=False, **kwargs):
[A] = params
y = np.full(len(a_x), A)
return y
def _f_init_params_constant(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
return np.nanmean(a_y) + np.random.uniform(0, 1, 1)
model_constant = ForecastModel(
'constant',
1,
_f_model_constant,
_f_init_params_constant)
# - Naive model: Y = Y(x-1)
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _f_model_naive(a_x, a_date, params, is_mult=False, df_actuals=None):
if df_actuals is None:
raise ValueError('model_naive requires a df_actuals argument')
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out = (
# This is not really intended to work with multiple values per sample
df_actuals.drop_duplicates('x')
.merge(df_out_tmp, how='outer')
.sort_values('x')
)
df_out['y'] = (
df_out.y.shift(1)
.fillna(method='ffill')
.fillna(method='bfill')
)
df_out = df_out.loc[df_out.x.isin(a_x)]
# df_out = df_out_tmp.merge(df_out, how='left')
# TODO: CHECK THAT X,DATE order is preserved
# TODO: df_out = df_out.merge(df_out_tmp, how='right')
return df_out.y.values
model_naive = ForecastModel('naive', 0, _f_model_naive)
# - Seasonal naive model
# Note: This model requires passing the actuals data - it is not fitted by
# regression. We still pass it to forecast.fit_model() for consistency
# with the rest of the library
def _fillna_wday(df):
"""
In a time series, shift samples by 1 week
and fill gaps with data from same weekday
"""
def add_col_y_out(df):
df = df.assign(y_out=df.y.shift(1).fillna(method='ffill'))
return df
df_out = (
df
.assign(wday=df.date.dt.weekday)
.groupby('wday', as_index=False).apply(add_col_y_out)
.sort_values(['x'])
.reset_index(drop=True)
)
return df_out
def _f_model_snaive_wday(a_x, a_date, params, is_mult=False, df_actuals=None):
"""Naive model - takes last valid weekly sample"""
if df_actuals is None:
raise ValueError('model_snaive_wday requires a df_actuals argument')
# df_actuals_model - table with actuals samples,
# adding y_out column with naive model values
df_actuals_model = _fillna_wday(df_actuals.drop_duplicates('x'))
# df_last_week - table with naive model values from last actuals week,
# to use in extrapolation
df_last_week = (
df_actuals_model
# Fill null actual values with data from previous weeks
.assign(y=df_actuals_model.y.fillna(df_actuals_model.y_out))
.drop_duplicates('wday', keep='last')
[['wday', 'y']]
.rename(columns=dict(y='y_out'))
)
# Generate table with extrapolated samples
df_out_tmp = pd.DataFrame({'date': a_date, 'x': a_x})
df_out_tmp['wday'] = df_out_tmp.date.dt.weekday
df_out_extrapolated = (
df_out_tmp
.loc[~df_out_tmp.date.isin(df_actuals_model.date)]
.merge(df_last_week, how='left')
.sort_values('x')
)
# Filter actuals table - only samples in a_x, a_date
df_out_actuals_filtered = (
# df_actuals_model.loc[df_actuals_model.x.isin(a_x)]
# Using merge rather than simple filtering to account for
# dates with multiple samples
df_actuals_model.merge(df_out_tmp, how='inner')
.sort_values('x')
)
df_out = (
pd.concat(
[df_out_actuals_filtered, df_out_extrapolated],
sort=False, ignore_index=True)
)
return df_out.y_out.values
model_snaive_wday = ForecastModel('snaive_wday', 0, _f_model_snaive_wday)
# - Spike model: :math:`Y = A`, when x_min <= X < x_max
def _f_model_spike(a_x, a_date, params, is_mult=False, **kwargs):
[A, x_min, x_max] = params
if is_mult:
c = 1
else:
c = 0
y = np.concatenate((
np.full(int(x_min), c),
np.full(int(x_max - x_min), A),
np.full(len(a_x) - int(x_max), c)
))
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
# if not a_y.any():
if a_y is None:
return [1] + np.random.uniform(0, 1, 1) + [2]
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
x_start = np.argmax(diffs)
x_end = x_start + 1
return np.array([diff, x_start, x_end])
model_spike = ForecastModel('spike', 3, _f_model_spike, _f_init_params_spike)
# - Spike model for dates - dates are fixed for each model
def _f_model_spike_date(
a_x,
a_date,
params,
date_start,
date_end,
is_mult=False):
[A] = params
mask_spike = (a_date >= date_start) * (a_date < date_end)
if is_mult:
y = mask_spike * A + ~mask_spike
else:
y = mask_spike * A
return y
def _f_init_params_spike(a_x=None, a_y=None, a_date=None, is_mult=False):
""" params are spike height, x start, x end """
if a_y is None:
return np.concatenate([np.array([1]) + np.random.uniform(0, 1, 1)])
else:
diffs = np.diff(a_y)
# if diffs:
if True:
diff = max(diffs)
return np.array([diff])
# else:
# rand = np.random.randint(1, len(a_y) - 1)
# return [1]
def get_model_spike_date(date_start, date_end):
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_spike_date(a_x, a_date, params, date_start, date_end, is_mult)
)
model_spike_date = ForecastModel(
'spike_date[{},{}]'.format(
pd.to_datetime(date_start).date(),
pd.to_datetime(date_end).date()),
1,
f_model,
_f_init_params_spike)
return model_spike_date
# - Linear model: :math:`Y = A*x + B`
def _f_model_linear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * a_x + B
return y
def _f_init_params_linear(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(low=0, high=1, size=2)
else: # TODO: Improve this
if a_x is not None:
a_x_size = np.unique(a_x).size - 1
else:
a_x_size = a_y.size - 1
A = (a_y[-1] - a_y[0]) / a_x_size
B = a_y[0]
# Uniform low= 0*m, high = 1*m
return np.array([A, B])
model_linear = ForecastModel(
'linear',
2,
_f_model_linear,
_f_init_params_linear)
def f_init_params_linear_nondec(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
params = _f_init_params_linear(a_x, a_y, a_date)
if params[0] < 0:
params[0] = 0
return params
def f_bounds_linear_nondec(a_x=None, a_y=None, a_date=None):
# first param should be between 0 and inf
return [0, -np.inf], [np.inf, np.inf]
model_linear_nondec = ForecastModel('linear_nondec', 2, _f_model_linear,
f_init_params=f_init_params_linear_nondec,
f_bounds=f_bounds_linear_nondec)
# - QuasiLinear model: :math:`Y = A t^{B} + C`
def _f_model_quasilinear(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, C) = params
y = A * np.power(a_x, B) + C
return y
model_quasilinear = ForecastModel('quasilinear', 3, _f_model_quasilinear)
# - Exponential model: math:: Y = A * B^t
# TODO: Deprecate - not safe to use
def _f_model_exp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
y = A * np.power(B, a_x)
return y
model_exp = ForecastModel('exponential', 2, _f_model_exp)
# - Exponential decay model: math:: Y = A * e^(B*(x-C)) + D
def _f_model_decay(a_x, a_date, params, is_mult=False, **kwargs):
(A, B, D) = params
y = A * np.exp(B * (a_x)) + D
return y
def _f_validate_input_decay(a_x, a_y, a_date):
assert (a_y > 0).all()
def f_init_params_decay(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.array([0, 0, 0])
A = a_y[0] - a_y[-1]
B = np.log(np.min(a_y) / np.max(a_y)) / (len(a_y) - 1)
if B > 0 or B == -np.inf:
B = -0.5
C = a_y[-1]
return np.array([A, B, C])
def f_bounds_decay(a_x=None, a_y=None, a_date=None):
return [-np.inf, -np.inf, -np.inf], [np.inf, 0, np.inf]
model_decay = ForecastModel('decay', 3, _f_model_decay,
f_init_params=f_init_params_decay,
f_bounds=f_bounds_decay,
l_f_validate_input=_f_validate_input_decay)
# - Step function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_step(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (B - 1) * np.heaviside(a_x - A, 1)
else:
y = B * np.heaviside(a_x - A, 1)
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 2)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([a, b * 2])
# TODO: Add boundaries for X axis
model_step = ForecastModel('step', 2, _f_step, _f_init_params_step)
# - Spike model for dates - dates are fixed for each model
def _f_model_step_date(a_x, a_date, params, date_start, is_mult=False):
[A] = params
mask_step = (a_date >= date_start).astype(float)
if is_mult:
# y = mask_step*A + ~mask_step
y = mask_step * (A - 1) + 1
else:
y = mask_step * A
return y
# TODO: Implement initialisation for multiplicative composition
def _f_init_params_step_date(a_x=None, a_y=None, a_date=None, is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 1)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(1, 'diff').index[0]
b = df['diff'].iloc[a]
return np.array([b * 2])
def get_model_step_date(date_start):
date_start = pd.to_datetime(date_start)
f_model = (
lambda a_x, a_date, params, is_mult=False, **kwargs:
_f_model_step_date(a_x, a_date, params, date_start, is_mult)
)
model_step_date = ForecastModel('step_date[{}]'.format(date_start.date()),
1, f_model, _f_init_params_step_date)
return model_step_date
# Two step functions
def _f_n_steps(n, a_x, a_date, params, is_mult=False):
if is_mult:
y = 1
else:
y = 0
for i in range(0, n + 1, 2):
A, B = params[i: i + 2]
if is_mult:
y = y * _f_step(a_x, a_date, (A, B), is_mult)
else:
y = y + _f_step(a_x, a_date, (A, B), is_mult)
return y
def _f_two_steps(a_x, a_date, params, is_mult=False, **kwargs):
return _f_n_steps(
n=2,
a_x=a_x,
a_date=a_date,
params=params,
is_mult=is_mult)
def _f_init_params_n_steps(
n=2,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, n * 2)
else:
# max difference between consecutive values
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'b': a_y})
df['diff'] = df.diff().abs()
# if is_mult, replace above line with something like
# np.concatenate([[np.NaN],a_y[:-1]/a_y[1:]])
a = df.nlargest(n, 'diff').index[0:n].values
b = df['diff'].iloc[a].values
params = []
for i in range(0, n):
params += [a[i], b[i]]
return np.array(params)
def _f_init_params_two_steps(a_x=None, a_y=None, a_date=None, is_mult=False):
return _f_init_params_n_steps(
n=2,
a_x=a_x,
a_y=a_y,
a_date=a_date,
is_mult=is_mult)
model_two_steps = ForecastModel(
'two_steps',
2 * 2,
_f_two_steps,
_f_init_params_two_steps)
# - Sigmoid step function: `Y = {A + (B - A) / (1 + np.exp(- D * (a_x - C)))}`
# Spans from A to B, C is the position of the step in x axis
# and D is how steep the increase is
def _f_sigmoid(a_x, a_date, params, is_mult=False, **kwargs):
(B, C, D) = params
if is_mult:
A = 1
else:
A = 0
# TODO check if a_x is negative
y = A + (B - A) / (1 + np.exp(- D * (a_x - C)))
return y
def _f_init_params_sigmoid_step(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
return np.random.uniform(0, 1, 3)
else:
if a_y.ndim > 1:
a_y = a_y[:, 0]
df = pd.DataFrame({'y': a_y})
# max difference between consecutive values
df['diff'] = df.diff().abs()
c = df.nlargest(1, 'diff').index[0]
b = df.loc[c, 'y']
d = b * b
return b, c, d
def _f_init_bounds_sigmoid_step(a_x=None, a_y=None, a_date=None):
if a_y is None:
return [-np.inf, -np.inf, 0.], 3 * [np.inf]
if a_y.ndim > 1:
a_y = a_y[:, 0]
if a_x.ndim > 1:
a_x = a_x[:, 0]
diff = max(a_y) - min(a_y)
b_min = -2 * diff
b_max = 2 * diff
c_min = min(a_x)
c_max = max(a_x)
d_min = 0.
d_max = np.inf
return [b_min, c_min, d_min], [b_max, c_max, d_max]
# In this model, parameter initialization is aware of number of steps
model_sigmoid_step = ForecastModel(
'sigmoid_step',
3,
_f_sigmoid,
_f_init_params_sigmoid_step,
f_bounds=_f_init_bounds_sigmoid_step)
model_sigmoid = ForecastModel('sigmoid', 3, _f_sigmoid)
# Ramp functions - used for piecewise linear models
# example : model_linear_pw2 = model_linear + model_ramp
# example 2: model_linear_p23 = model_linear + model_ramp + model_ramp
# - Ramp function: :math:`Y = {0, if x < A | B, if x >= A}`
# A is the time of step, and B is the step
def _f_ramp(a_x, a_date, params, is_mult=False, **kwargs):
(A, B) = params
if is_mult:
y = 1 + (a_x - A) * (B) * np.heaviside(a_x - A, 1)
else:
y = (a_x - A) * B * np.heaviside(a_x - A, 1)
return y
def _f_init_params_ramp(a_x=None, a_y=None, a_date=None, is_mult=False):
# TODO: set boundaries: a_x (0.2, 0.8)
if a_y is None:
if a_x is not None:
nfirst_last = int(np.ceil(0.15 * a_x.size))
a = np.random.uniform(a_x[nfirst_last], a_x[-nfirst_last - 1], 1)
else:
a = np.random.uniform(0, 1, 1)
b = np.random.uniform(0, 1, 1)
return np.concatenate([a,
b])
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
df = pd.DataFrame({'b': a_y})
if a_x is not None:
#
df['x'] = a_x
# Required because we support input with multiple samples per x
# value
df = df.drop_duplicates('x')
df = df.set_index('x')
# max difference between consecutive values -- this assumes no null
# values in series
df['diff2'] = df.diff().diff().abs()
# We ignore the last 15% of the time series
skip_samples = int(np.ceil(df.index.size * 0.15))
a = (df.head(-skip_samples).tail(
-skip_samples).nlargest(1, 'diff2').index[0]
)
b = df['diff2'].loc[a]
# TODO: replace b with estimation of slope in segment 2
# minus slope in segment 1 - see init_params_linear
return np.array([a, b])
def _f_init_bounds_ramp(a_x=None, a_y=None, a_date=None):
if a_x is None:
a_min = -np.inf
a_max = np.inf
else:
# a_min = np.min(a_x)
nfirst_last = int(np.ceil(0.15 * a_x.size))
a_min = a_x[nfirst_last]
a_max = a_x[-nfirst_last]
# a_min = np.percentile(a_x, 15)
# a_max = np.percentile(a_x,85)
if a_y is None:
b_min = -np.inf
b_max = np.inf
else:
# TODO: FILTER A_Y BY 20-80 PERCENTILE IN A_X
# df = pd.DataFrame({'b': a_y})
# #max_diff2 = np.max(df.diff().diff().abs())
# max_diff2 = np.max(np.abs(np.diff(np.diff(a_y))))
#
# b_min = -2*max_diff2
# b_max = 2*max_diff2
b_min = -np.inf
b_max = np.inf
# logger_info('DEBUG: BOUNDS:',(a_min, b_min,a_max, b_max))
return ([a_min, b_min], [a_max, b_max])
model_ramp = ForecastModel(
'ramp',
2,
_f_ramp,
_f_init_params_ramp,
_f_init_bounds_ramp)
# - Weekday seasonality
def _f_model_season_wday(
a_x, a_date, params, is_mult=False,
# cache variables
a_weekday=None,
**kwargs):
# Weekday seasonality model, 6 params
# params_long[0] is default series value,
params_long = np.concatenate([[float(is_mult)], params])
if a_weekday is None:
a_weekday = _f_init_cache_a_weekday(a_x, a_date)
return params_long[a_weekday]
def _f_validate_input_season_wday(a_x, a_y, a_date):
assert a_date is not None
assert a_date.weekday.drop_duplicates().size == 7
model_season_wday = ForecastModel(
'season_wday',
6,
_f_model_season_wday,
l_f_validate_input=_f_validate_input_season_wday,
l_cache_vars=['a_weekday']
)
# - Month seasonality
def _f_init_params_season_month(
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None or a_date is None:
return np.random.uniform(low=-1, high=1, size=11)
else: # TODO: Improve this
l_params_long = [np.mean(a_y[a_date.month == i])
for i in np.arange(1, 13)]
l_baseline = l_params_long[-1]
l_params = l_params_long[:-1]
if not is_mult:
l_params_add = l_params - l_baseline
return l_params_add
else:
l_params_mult = l_params / l_baseline
return l_params_mult
def _f_model_season_month(
a_x, a_date, params, is_mult=False,
# cache variables
a_month=None,
**kwargs):
# Month of December is taken as default level, has no parameter
# params_long[0] is default series value
params_long = np.concatenate([[float(is_mult)], params])
if a_month is None:
a_month = _f_init_cache_a_month(a_x, a_date)
return params_long[a_month]
model_season_month = ForecastModel(
'season_month',
11,
_f_model_season_month,
_f_init_params_season_month,
l_cache_vars=['a_month']
)
model_season_month_old = ForecastModel(
'season_month_old', 11, _f_model_season_month)
def _f_model_yearly_season_fourier(
a_x,
a_date,
params,
is_mult=False,
# cache params
a_t_fourier=None,
**kwargs):
if a_t_fourier is None:
a_t_fourier = _f_init_cache_a_t_fourier(None, a_date)
y = np.matmul(params, a_t_fourier)
return y
def _f_init_params_fourier_n_params(
n_params,
a_x=None,
a_y=None,
a_date=None,
is_mult=False):
if a_y is None:
params = np.random.uniform(0.001, 1, n_params)
else:
# max difference in time series
diff = a_y.max() - a_y.min()
params = diff * np.random.uniform(0.001, 1, n_params)
return params
def _f_init_params_fourier(a_x=None, a_y=None, a_date=None, is_mult=False):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_params_fourier_n_params(
n_params, a_x=a_x, a_y=a_y, a_date=a_date, is_mult=is_mult)
def _f_init_bounds_fourier_nparams(n_params, a_x=None, a_y=None, a_date=None):
return n_params * [-np.inf], n_params * [np.inf]
def _f_init_bounds_fourier_yearly(a_x=None, a_y=None, a_date=None):
n_params = 2 * _dict_fourier_config['harmonics']
return _f_init_bounds_fourier_nparams(n_params, a_x, a_y, a_date)
model_season_fourier_yearly = ForecastModel(
name='season_fourier_yearly',
n_params=2 * _dict_fourier_config.get('harmonics'),
f_model=_f_model_yearly_season_fourier,
f_init_params=_f_init_params_fourier,
f_bounds=_f_init_bounds_fourier_yearly,
l_cache_vars='a_t_fourier'
)
def get_fixed_model(forecast_model, params_fixed, is_mult=False):
# Generate model with some fixed parameters
if forecast_model.n_params == 0: # Nothing to do
return forecast_model
if len(params_fixed) != forecast_model.n_params:
err = 'Wrong number of fixed parameters'
raise ValueError(err)
return ForecastModel(
forecast_model.name + '_fixed', 0,
f_model=lambda a_x, a_date, params, is_mult=is_mult, **kwargs:
forecast_model.f_model(
a_x=a_x, a_date=a_date, params=params_fixed, is_mult=is_mult))
def get_iqr_thresholds(s_diff, low=0.25, high=0.75):
# Get thresholds based on inter quantile range
q1 = s_diff.quantile(low)
q3 = s_diff.quantile(high)
iqr = q3 - q1
thr_low = q1 - 1.5 * iqr
thr_hi = q3 + 1.5 * iqr
return thr_low, thr_hi
# TODO: Add option - estimate_outl_size
# TODO: Add option - sigmoid steps
# TODO: ADD option - gaussian spikes
def get_model_outliers(df, window=3):
"""
Identify outlier samples in a time series
:param df: Input time series
:type df: pandas.DataFrame
:param window: The x-axis window to aggregate multiple steps/spikes
:type window: int
:return:
| tuple (mask_step, mask_spike)
| mask_step: True if sample contains a step
| mask_spike: True if sample contains a spike
:rtype: tuple of 2 numpy arrays of booleans
TODO: require minimum number of samples to find an outlier
"""
dfo = df.copy() # dfo - df for outliers
# If df has datetime index, use date logic in steps/spikes
with_dates = 'date' in df.columns
x_col = 'date' if with_dates else 'x'
if df[x_col].duplicated().any():
raise ValueError('Input cannot have multiple values per sample')
# Get the differences
dfo['dif'] = dfo.y.diff()
# We consider as outliers the values that are
# 1.5 * IQR (interquartile range) beyond the quartiles.
# These thresholds are obtained here
thr_low, thr_hi = get_iqr_thresholds(dfo.dif)
# Now identify the changes
dfo['ischange'] = ((dfo.dif < thr_low) | (dfo.dif > thr_hi)).astype(int)
# Whenever there are two or more consecutive changes
# (that is, within `window` samples), we group them together
dfo['ischange_group'] = (
dfo.ischange.rolling(window, win_type=None, center=True).max().fillna(
0).astype(int)
)
# We now have to calculate the difference within the
# same group in order to identify if the consecutive changes
# result in a step, a spike, or both.
# We get the filtered difference
dfo['dif_filt'] = (dfo.dif * dfo.ischange).fillna(0)
# And the absolute value of that
dfo['dif_filt_abs'] = dfo.dif_filt.abs()
dfo['change_group'] = dfo.ischange_group.diff(
).abs().fillna(0).astype(int).cumsum()
# this gets us the average difference of the outliers within each change
# group
df_mean_gdiff = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt'].mean().rename('mean_group_diff').reset_index())
# this gets us the average absolute difference of the outliers within each
# change group
df_mean_gdiff_abs = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group')[
'dif_filt_abs'].mean().rename(
'mean_group_diff_abs').reset_index()
)
# Merge the differences with the original dfo
dfo = dfo.merge(
df_mean_gdiff,
how='left').merge(
df_mean_gdiff_abs,
how='left')
# Fill missing values with zero -> no change
dfo.mean_group_diff = dfo.mean_group_diff.fillna(0)
dfo.mean_group_diff_abs = dfo.mean_group_diff_abs.fillna(0)
# the change group is a step if the mean_group_diff exceeds the thresholds
dfo['is_step'] = dfo['ischange_group'] & (
((dfo.mean_group_diff < thr_low) | (dfo.mean_group_diff > thr_hi)))
# the change group is a spike if the difference between the
# mean_group_diff_abs and the average mean_group_diff exceeds
# the average threshold value
dfo['is_spike'] = (dfo.mean_group_diff_abs -
dfo.mean_group_diff.abs()) > (thr_hi - thr_low) / 2
# Get the outlier start and end points for each group
df_outl = (
dfo.loc[dfo.ischange.astype(bool)].groupby('change_group').apply(
lambda x: pd.Series(
{'outl_start': x[x_col].iloc[0],
'outl_end': x[x_col].iloc[-1]})).reset_index()
)
if df_outl.empty: # No outliers - nothing to do
return np.full(dfo.index.size, False), np.full(dfo.index.size, False)
dfo = dfo.merge(df_outl, how='left')
# Get the start and end points in dfo
if with_dates:
# Convert to datetime, if we are using dates
dfo['outl_start'] = pd.to_datetime(dfo.outl_start)
dfo['outl_end'] = pd.to_datetime(dfo.outl_end)
# Create the mask for spikes and steps
dfo['mask_spike'] = (dfo['is_spike'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date < pd.to_datetime(dfo.outl_end)))
dfo['mask_step'] = (dfo['is_step'] &
(dfo.date >= pd.to_datetime(dfo.outl_start)) &
(dfo.date <= pd.to_datetime(dfo.outl_end)))
else:
# For non-date x values, we fill na's and convert to int
dfo['outl_start'] = dfo.outl_start.fillna(0).astype(int)
dfo['outl_end'] = dfo.outl_end.fillna(0).astype(int)
# Create the mask for spikes and steps
dfo['mask_spike'] = (dfo['is_spike'] &
(dfo.x >= dfo.outl_start) &
(dfo.x < dfo.outl_end))
dfo['mask_step'] = (dfo['is_step'] &
(dfo.x >= dfo.outl_start) &
(dfo.x <= dfo.outl_end))
return dfo.mask_step.values, dfo.mask_spike.values
def create_fixed_step(diff, x):
# Generate a fixed step model
fixed_params = [x, diff]
return get_fixed_model(model_step, fixed_params)
def create_fixed_spike(diff, x, duration):
# Generate a fixed spike model
fixed_params = [diff, x, x + duration]
return get_fixed_model(model_spike, fixed_params)
def create_fixed_spike_ignored(x, duration):
# Generate a fixed spike ignored model
fixed_params = [0, x, x + duration]
return get_fixed_model(model_spike, fixed_params, is_mult=True)
# Dummy variable models
def _validate_f_dummy(f_dummy):
# Ensures that behaviour of f_dummy matches specs
# Must return array of floats, same length as a_x, with values either 0.
# or 1.
def validate_for_dummy(a_dummy):
assert isinstance(a_dummy, np.ndarray)
assert np.setdiff1d(a_dummy, np.array([0., 1.])).size == 0
# validate_for_dummy(f_dummy(np.arange(0, 10), None)) # Crashes with
# f_dummy 's that require dates
validate_for_dummy(
f_dummy(
np.arange(
0, 10), pd.date_range(
'2018-01-01', '2018-01-10')))
def _get_f_dummy(dummy):
"""
Get a function that generates a mask array from a dummy variable
:param dummy: dummy variable, that can be used to generate a mask array
:type dummy: function, pandas Holiday/Calendar,
or list-like of numerics or dates
:return: model function based on dummy variable, to use on a ForecastModel
:rtype: function
"""
if callable(dummy): # If dummy is a function, use it
f_dummy = dummy
elif isinstance(dummy, Holiday):
f_dummy = _get_f_dummy_from_holiday(dummy)
elif isinstance(dummy, AbstractHolidayCalendar):
f_dummy = _get_f_dummy_from_calendar(dummy)
else:
# If dummy is a list, convert to function
f_dummy = _get_f_dummy_from_list(dummy)
return f_dummy
def _get_f_dummy_from_list(list_check):
"""
Generate a f_dummy function that defines a dummy variable, can be used
for dummy models
:param list_check: Input list
:type list_check: list-like of numerics or datetime-likes
:return: f_dummy
:rtype: function
"""
# Generate a f_dummy function that defines a dummy variable, can be used
# for dummy models
s_check = pd.Series(list_check)
assert s_check.size, 'Input list cannot be empty'
if pd.api.types.is_numeric_dtype(s_check):
list_check_numeric = s_check
def f_dummy_list_numeric(a_x, a_date):
# return a_x in check_numeric
return np.isin(a_x, list_check_numeric).astype(float)
return f_dummy_list_numeric
else:
try:
list_check_date = pd.to_datetime(s_check)
def f_dummy_list_date(a_x, a_date):
# return a_x in check_numeric
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_list_date
except BaseException:
raise ValueError(
'list_dummy must be a list-like with numeric or'
'date-like values: %s', list_check)
def _get_f_dummy_from_calendar(calendar):
# Generate dummy model function from a pandas HolidayCalendar
def f_dummy_calendar(a_x, a_date, **kwargs):
# TODO: If we can pass dict_cal as an argument,
# use pre-loaded list of dates for performance
# TODO: If we can guarantee sorted dates,
# change this to a_date[0], a_date[-1] for performance
list_check_date = calendar.holidays(a_date.min(), a_date.max())
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_calendar
def _get_f_dummy_from_holiday(holiday):
def f_dummy_holiday(a_x, a_date, **kwargs):
# TODO: If we can pass dict_cal as an argument,
# use pre-loaded list of dates for performance
# if dict_cal in kwargs.keys():
# list_check_date = dict_cal.get(holiday.name)
# else:
# TODO: If we can guarantee sorted dates,
# change this to a_date[0], a_date[-1] for performance
list_check_date = holiday.dates(a_date.min(), a_date.max())
return np.isin(a_date, list_check_date).astype(float)
return f_dummy_holiday
def _get_f_model_dummy(f_dummy, mask_name):
"""
Generate a model function for a dummy variable defined by f_dummy
:param dummy: dummy variable, that can be used to generate a mask array
:type dummy: function, pandas Holiday/Calendar,
or list-like of numerics or dates
:return: model function based on dummy variable, to use on a ForecastModel
:rtype: function
"""
def f_model_check(a_x, a_date, params, is_mult=False, **kwargs):
# Uses internal f_check to assign 0 or 1 to each sample
# If f_dummy(x)==1, return A
# If f_dummy(x)==0, return 0 (or 1 if is_mult)
a_mask = kwargs.get(mask_name)
if a_mask is None:
a_mask = f_dummy(a_x, a_date)
[A] = params
if not is_mult:
a_result = A * a_mask
else:
a_result = (A) * a_mask + 1
return a_result
return f_model_check
def get_model_dummy(name, dummy, **kwargs):
"""
Generate a model based on a dummy variable.
:param name: Name of the model
:type name: basestring
:param dummy:
| Can be a function or a list-like.
| If a function, it must be of the form f_dummy(a_x, a_date),
| and return a numpy array of floats
| with the same length as a_x and values that are either 0 or 1.
| If a list-like of numerics, it will be converted to a f_dummy function
| as described above, which will have values of 1 when a_x has one of
| the values in the list, and 0 otherwise. If a list-like of date-likes,
| it will be converted to a f_dummy function as described above, which
| will have values of 1 when a_date has one of the values in the list,
| and 0 otherwise.
:type dummy: function, or list-like of numerics or datetime-likes
:param kwargs:
:type kwargs:
:return:
| A model that returns A when dummy is 1, and 0 (or 1 if is_mult==True)
| otherwise.
:rtype: ForecastModel
"""
mask_name = 'mask_' + name
f_dummy = _get_f_dummy(dummy)
_validate_f_dummy(f_dummy)
f_model_dummy = _get_f_model_dummy(f_dummy, mask_name)
dict_f_cache = {mask_name: f_dummy}
return ForecastModel(
name, 1, f_model_dummy, dict_f_cache=dict_f_cache, **kwargs)
model_season_wday_2 = get_model_dummy(
'season_wday_2', lambda a_x, a_date, **kwargs:
(a_date.weekday < 5).astype(float))
# Example dummy model - checks if it is Christmas
model_dummy_christmas = get_model_dummy(
'dummy_christmas', lambda a_x, a_date, **kwargs:
((a_date.month == 12) & (a_date.day == 25)).astype(float))
# Example dummy model - checks if it is first day of month
model_dummy_month_start = get_model_dummy(
'dummy_month_start', lambda a_x, a_date, **kwargs:
(a_date.day == 1).astype(float))
class CalendarBankHolUK(AbstractHolidayCalendar):
rules = [
GoodFriday,
EasterMonday,
# Early May Bank Holiday - first Monday in May
Holiday('Early May Bank Holiday', month=5, day=1,
offset=DateOffset(weekday=MO(1))
),
# Spring Bank Holiday - Last Monday in May
Holiday('Spring Bank Holiday', month=5, day=31,
offset=DateOffset(weekday=MO(-1))
),
# August Bank holiday - Last Monday in August
Holiday('August Bank Holiday', month=8, day=30,
offset=DateOffset(weekday=MO(-1))
)
]
class CalendarChristmasUK(AbstractHolidayCalendar):
rules = [
Holiday('New Year\'s Day', month=1, day=1, observance=next_monday),
Holiday('Christmas', month=12, day=25, observance=next_monday),
Holiday('Boxing Day', month=12, day=26,
observance=next_monday_or_tuesday),
]
# Bank Holidays for Italy
class CalendarBankHolIta(AbstractHolidayCalendar):
rules = [
EasterMonday,
Holiday('Festa della Liberazione', month=4, day=25),
Holiday('Festa del lavoro', month=5, day=1),
Holiday('Festa della Repubblica', month=6, day=2),
Holiday('Ferragosto', month=8, day=15),
Holiday('Tutti i Santi', month=11, day=1),
Holiday('Immacolata Concezione', month=12, day=8),
]
class CalendarChristmasIta(AbstractHolidayCalendar):
rules = [
Holiday('New Year\'s Day', month=1, day=1, observance=next_monday),
Holiday('Christmas', month=12, day=25, observance=next_monday),
Holiday('Santo Stefano', month=12, day=26,
observance=next_monday_or_tuesday),
Holiday('Epiphany', month=1, day=6, observance=next_monday),
]
def get_model_from_calendars(l_calendar, name=None):
"""
Create a ForecastModel based on a list of pandas Calendars.
:param calendar:
:type calendar: pandas.tseries.AbstractHolidayCalendar
:return: model based on the input calendar
:rtype: ForecastModel
In pandas, Holidays and calendars provide a simple way to define
holiday rules, to be used in any analysis that requires a predefined
set of holidays. This function converts a Calendar object into a
ForecastModel that assigns a parameter to each calendar rule.
As an example, a Calendar with 1 rule defining Christmas dates
generates a model with a single parameter, which
determines the amount added/multiplied to samples falling on Christmas.
A calendar with 2 rules for Christmas and New Year will have two parameters
- the first one applying to samples in Christmas, and the second
one applying to samples in New Year.
Usage::
from pandas.tseries.holiday import USFederalHolidayCalendar
model_calendar = get_model_from_calendar(USFederalHolidayCalendar())
"""
if isinstance(l_calendar, AbstractHolidayCalendar):
l_calendar = [l_calendar]
# Filter out calendars without rules
l_calendar = [calendar for calendar in l_calendar if calendar.rules]
assert len(l_calendar), 'Need 1+ valid calendars'
if name is None:
name = l_calendar[0].name
l_model_dummy = [get_model_dummy(calendar.name, calendar)
for calendar in l_calendar]
f_model_prod = np.prod(l_model_dummy)
f_model_sum = np.sum(l_model_dummy)
def _f_init_params_calendar(
a_x=None, a_y=None, a_date=None, is_mult=False):
if is_mult:
return np.ones(len(l_model_dummy))
else:
return np.zeros(len(l_model_dummy))
def _f_model_calendar(a_x, a_date, params, is_mult=False, **kwargs):
f_all_dummies = f_model_prod if is_mult else f_model_sum
return f_all_dummies(a_x, a_date, params, is_mult, **kwargs)
model_calendar = ForecastModel(
name,
len(l_model_dummy),
_f_model_calendar,
_f_init_params_calendar,
l_cache_vars=f_model_sum.l_cache_vars,
dict_f_cache=f_model_sum.dict_f_cache
)
return model_calendar
model_calendar_uk = get_model_from_calendars(
[CalendarChristmasUK(), CalendarBankHolUK()], 'calendar_uk')
model_calendar_us = get_model_from_calendars(
|
USFederalHolidayCalendar()
|
pandas.tseries.holiday.USFederalHolidayCalendar
|
# %%
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import GridSearchCV
import numpy as np
import os
import pandas as pd
# %%
# base_dir = os.getcwd()
base_dir = '/Users/jason/bestpaycup2020'
x_df = pd.read_csv(base_dir + '/dataset/dataset4/trainset/train_base.csv')
y_df = pd.read_csv(base_dir + '/dataset/raw_dataset/trainset/train_label.csv')
data_x = np.array(x_df)
# train_x = np.delete(train_x, 0, axis=1)
data_y = np.array(y_df)
# %%
# 将x与y对应,并做预处理
data_x = data_x[data_x[:, 0].argsort()]
data_y = data_y[data_y[:, 0].argsort()]
data_x = data_x[:, 1:].astype(float)
data_y = data_y[:, 1:].astype(float).reshape(1, -1)[0]
# %%
# 归一化
# n, l = data_x.shape
# for j in range(l):
# meanVal = np.mean(data_x[:, j])
# stdVal = np.std(data_x[:, j])
# if stdVal != 0 and not np.all(meanVal == 0.0):
# data_x[:, j] = (data_x[:, j] - meanVal) / stdVal
# %%
# 打乱数据
state = np.random.get_state()
np.random.shuffle(data_x)
np.random.set_state(state)
np.random.shuffle(data_y)
# %%
X_train, X_val, y_train, y_val = train_test_split(data_x, data_y, test_size=0.1)
# %%
train_data = lgb.Dataset(X_train, label=y_train)
val_data = lgb.Dataset(X_val, label=y_val)
params = {
'learning_rate': 0.1,
'max_depth': 10,
'num_leaves': 1000,
'objective': 'binary',
'subsample': 0.8,
'colsample_bytree': 0.8,
'metric': 'auc',
'n_estimators': 63
# 'is_training_metric': True,
}
# %%
# train
# clf = lgb.train(params, train_data, valid_sets=[val_data])
# %%
# 调参,找出最佳 n_estimators
clf = lgb.cv(params, train_data, num_boost_round=1000, nfold=5, stratified=False, shuffle=True, metrics='auc',
early_stopping_rounds=50, seed=0)
print('best n_estimators:', len(clf['auc-mean']))
print('best cv score:', pd.Series(clf['auc-mean']).max())
# best:54
# %%
# 调参,确定max_depth和num_leaves
params_test1 = {
'max_depth': range(3, 8, 1),
'num_leaves': range(5, 100, 5)
}
gsearch1 = GridSearchCV(estimator=lgb.LGBMClassifier(
boosting_type='gbdt',
objective='binary',
metrics='auc',
learning_rate=0.1,
n_estimators=54,
max_depth=10,
bagging_fraction=0.8,
feature_fraction=0.8
),
param_grid=params_test1,
scoring='roc_auc',
cv=5,
n_jobs=-1)
gsearch1.fit(X_train, y_train)
print(gsearch1.best_params_)
print(gsearch1.best_score_)
# output:
# {'max_depth': 7, 'num_leaves': 35}
# 0.6860003095778059
# %%
# 确定 min_data_in_leaf 和 max_bin
params_test2 = {
'max_bin': range(5, 256, 10),
'min_data_in_leaf': range(1, 102, 10)
}
gsearch2 = GridSearchCV(
estimator=lgb.LGBMClassifier(boosting_type='gbdt',
objective='binary',
metrics='auc',
learning_rate=0.1,
n_estimators=54,
max_depth=7,
num_leaves=35,
bagging_fraction=0.8,
feature_fraction=0.8),
param_grid=params_test2, scoring='roc_auc', cv=5, n_jobs=-1
)
gsearch2.fit(X_train, y_train)
print(gsearch2.best_params_)
print(gsearch2.best_score_)
# output:
# {'max_bin': 45, 'min_data_in_leaf': 81}
# 0.7130982903950965
# %%
# 确定 feature_fraction, bagging_fraction, bagging_freq
params_test3 = {
'feature_fraction': [0.3, 0.2, 0.5, 0.4],
'bagging_fraction': [0.3, 0.4, 0.5, 0.6, 0.7],
'bagging_freq': range(0, 40, 10)
}
gsearch3 = GridSearchCV(
estimator=lgb.LGBMClassifier(boosting_type='gbdt',
objective='binary',
metrics='auc',
learning_rate=0.1,
n_estimators=54,
max_depth=7,
num_leaves=35,
max_bin=45,
min_data_in_leaf=81),
param_grid=params_test3, scoring='roc_auc', cv=5, n_jobs=-1
)
gsearch3.fit(X_train, y_train)
print(gsearch3.best_params_)
print(gsearch3.best_score_)
# output
# {'bagging_fraction': 0.3, 'bagging_freq': 0, 'feature_fraction': 0.4}
# 0.7130982903950965
# %%
# 确定 lambda_l1 和 lambda_l2
params_test4 = {'lambda_l1': [0.9, 1.0, 1.2 , 1.3, 1.4,1.5,1.6],
'lambda_l2': [0.4, 0.5, 0.6]}
gsearch4 = GridSearchCV(
estimator=lgb.LGBMClassifier(boosting_type='gbdt',
objective='binary',
metrics='auc',
learning_rate=0.1,
n_estimators=54,
max_depth=7,
num_leaves=35,
max_bin=45,
min_data_in_leaf=81,
bagging_fraction=0.3,
bagging_freq=0,
feature_fraction=0.4),
param_grid=params_test4, scoring='roc_auc', cv=5, n_jobs=-1
)
gsearch4.fit(X_train, y_train)
print(gsearch4.best_params_)
print(gsearch4.best_score_)
# output
# {'lambda_l1': 1.0, 'lambda_l2': 0.5}
# 0.7132416453983882
# %%
# 确定 min_split_gain
params_test5 = {'min_split_gain': [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]}
gsearch5 = GridSearchCV(
estimator=lgb.LGBMClassifier(boosting_type='gbdt',
objective='binary',
metrics='auc',
learning_rate=0.1,
n_estimators=54,
max_depth=7,
num_leaves=35,
max_bin=45,
min_data_in_leaf=81,
bagging_fraction=0.3,
bagging_freq=0,
feature_fraction=0.4,
lambda_l1=1.0,
lambda_l2=0.5),
param_grid=params_test5, scoring='roc_auc', cv=5, n_jobs=-1
)
gsearch5.fit(X_train, y_train)
print(gsearch5.best_params_)
print(gsearch5.best_score_)
# output
# {'min_split_gain': 0.0}
# 0.7117714487623986
# %%
# 训练 1
"""
这个训练的效果不好,只有0.53的分数
"""
model1 = lgb.LGBMClassifier(boosting_type='gbdt',
objective='binary',
metrics='auc',
learning_rate=0.01,
n_estimators=10000,
max_depth=6,
num_leaves=30,
max_bin=15,
min_data_in_leaf=71,
bagging_fraction=0.6,
bagging_freq=0,
feature_fraction=0.8,
lambda_l1=1.0,
lambda_l2=0.7,
min_split_gain=0.1)
model1.fit(X_train, y_train, eval_set=[(X_val, y_val)], early_stopping_rounds=500)
y_hat = model1.predict(X_val)
print('auc: ', roc_auc_score(y_val, y_hat))
# %%
# 查看权重
headers = x_df.columns.tolist()
headers.pop(0)
pd.set_option('display.max_rows', None)
print(pd.DataFrame({
'column': headers,
'importance': model.feature_importances_,
}).sort_values(by='importance'))
importance = pd.DataFrame({
'column': headers,
'importance': model.feature_importances_,
}).sort_values(by='importance')
importance.to_csv(base_dir + '/models/treemodel/lgb_2_1_weight1.csv', index=False)
# %%
# 训练 2
"""
这个训练效果很好,本地0.72,上传后0.67(添加output1_1_1后)
"""
train_data = lgb.Dataset(X_train, label=y_train)
val_data = lgb.Dataset(X_val, label=y_val, reference=train_data)
params = {
'boosting_type': 'gbdt',
'objective': 'binary',
'metrics': 'auc',
'learning_rate': 0.01,
'n_estimators': 10000,
'max_depth': 7,
'num_leaves': 35,
'max_bin': 45,
'min_data_in_leaf': 81,
'bagging_fraction': 0.3,
'bagging_freq': 0,
'feature_fraction': 0.4,
'lambda_l1': 1.0,
'lambda_l2': 0.5,
'min_split_gain': 0.0
}
model2 = lgb.train(params, train_data, num_boost_round=1000, valid_sets=val_data, early_stopping_rounds=500)
y_hat = model2.predict(X_val)
print('auc: ', roc_auc_score(y_val, y_hat))
# %%
# 查看权重
headers = x_df.columns.tolist()
headers.pop(0)
|
pd.set_option('display.max_rows', None)
|
pandas.set_option
|
import os
import pandas as pd
import numpy as np
from keras import losses
from sklearn.model_selection import train_test_split
from utils import get_model, helpers
#
# LSTM
# if "rnn_lstm" in models:
def run_lstm(model_directory, X, Y, X_test, Y_test):
model = get_model.rnn_lstm(nclass=1, dense_layers=[64, 16, 8], binary=True)
file_name = "ptbdb_rnn_lstm"
file_path = os.path.join(model_directory, file_name + ".h5")
model = helpers.run(model, file_path, X, Y)
model.load_weights(file_path)
# Save the entire model as a SavedModel.
model.save(os.path.join(model_directory, file_name))
# Make predictions on test set
pred_test = model.predict(X_test)
# Evaluate predictions
helpers.test_binary(Y_test, pred_test)
#
# GRU
# if "rnn_gru" in models:
def run_gru(model_directory, X, Y, X_test, Y_test):
model = get_model.rnn_gru(nclass=1, dense_layers=[64, 16, 8], binary=True)
file_name = "ptbdb_rnn_gru"
file_path = os.path.join(model_directory, file_name + ".h5")
model = helpers.run(model, file_path, X, Y)
model.load_weights(file_path)
# Save the entire model as a SavedModel.
model.save(os.path.join(model_directory, file_name))
# Make predictions on test set
pred_test = model.predict(X_test)
# Evaluate predictions
helpers.test_binary(Y_test, pred_test)
#
# Bidirectional LSTM
# if "rnn_lstm_bidir" in models:
def run_lstm_bidir(model_directory, X, Y, X_test, Y_test):
model = get_model.rnn_lstm_bidir(nclass=1, dense_layers=[64, 16, 8], binary=True)
file_name = "ptbdb_rnn_lstm_bidir"
file_path = os.path.join(model_directory, file_name + ".h5")
model = helpers.run(model, file_path, X, Y)
model.load_weights(file_path)
# Save the entire model as a SavedModel.
model.save(os.path.join(model_directory, file_name))
# Make predictions on test set
pred_test = model.predict(X_test)
# Evaluate predictions
helpers.test_binary(Y_test, pred_test)
#
# Bidirectional GRU
# if "rnn_gru_bidir" in models:
def run_gru_bidir(model_directory, X, Y, X_test, Y_test):
model = get_model.rnn_gru_bidir(nclass=1, dense_layers=[64, 16, 8], binary=True)
file_name = "ptbdb_rnn_gru_bidir"
file_path = os.path.join(model_directory, file_name + ".h5")
model = helpers.run(model, file_path, X, Y)
model.load_weights(file_path)
# Save the entire model as a SavedModel.
model.save(os.path.join(model_directory, file_name))
# Make predictions on test set
pred_test = model.predict(X_test)
# Evaluate predictions
helpers.test_binary(Y_test, pred_test)
#
# Transfer Learning
# if "rnn_gru_bidir_transfer" in models:
def run_transfer_learning(base_model, model_directory, X, Y, X_test, Y_test):
model = get_model.transfer_learning(
nclass=1, base_model=base_model, loss=losses.binary_crossentropy
)
file_name = "ptbdb_rnn_gru_bidir_transfer"
file_path = file_name + ".h5"
model = helpers.run(model, file_path, X, Y)
model.load_weights(file_path)
# Save the entire model as a SavedModel.
model.save(os.path.join(model_directory, file_name))
# Make predictions on test set
pred_test = model.predict(X_test)
# Evaluate predictions
helpers.test_binary(Y_test, pred_test)
if __name__ == '__main__':
models = [
"rnn_lstm",
"rnn_gru",
"rnn_gru_bidir",
"rnn_lstm_bidir",
"rnn_transferlearning",
]
# Make directory
model_directory = "./models"
if not os.path.exists(model_directory):
os.makedirs(model_directory)
df_1 =
|
pd.read_csv("../data/code/ptbdb_normal.csv", header=None)
|
pandas.read_csv
|
from datetime import datetime
import re
import numpy as np
import pytest
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
_testing as tm,
)
def test_extract_expand_kwarg_wrong_type_raises(any_string_dtype):
# TODO: should this raise TypeError
values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
with pytest.raises(ValueError, match="expand must be True or False"):
values.str.extract(".*(BAD[_]+).*(BAD)", expand=None)
def test_extract_expand_kwarg(any_string_dtype):
s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
expected = DataFrame(["BAD__", np.nan, np.nan], dtype=any_string_dtype)
result = s.str.extract(".*(BAD[_]+).*")
tm.assert_frame_equal(result, expected)
result = s.str.extract(".*(BAD[_]+).*", expand=True)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
[["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
tm.assert_frame_equal(result, expected)
def test_extract_expand_False_mixed_object():
ser = Series(
["aBAD_BAD", np.nan, "BAD_b_BAD", True, datetime.today(), "foo", None, 1, 2.0]
)
# two groups
result = ser.str.extract(".*(BAD[_]+).*(BAD)", expand=False)
er = [np.nan, np.nan] # empty row
expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(result, expected)
# single group
result = ser.str.extract(".*(BAD[_]+).*BAD", expand=False)
expected = Series(
["BAD_", np.nan, "BAD_", np.nan, np.nan, np.nan, np.nan, np.nan, np.nan]
)
tm.assert_series_equal(result, expected)
def test_extract_expand_index_raises():
# GH9980
# Index only works with one regex group since
# multi-group would expand to a frame
idx = Index(["A1", "A2", "A3", "A4", "B5"])
msg = "only one regex group is supported with Index"
with pytest.raises(ValueError, match=msg):
idx.str.extract("([AB])([123])", expand=False)
def test_extract_expand_no_capture_groups_raises(index_or_series, any_string_dtype):
s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)
msg = "pattern contains no capture groups"
# no groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=False)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=False)
def test_extract_expand_single_capture_group(index_or_series, any_string_dtype):
# single group renames series/index properly
s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)
result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=False)
expected = index_or_series(["A", "A"], name="uno", dtype=any_string_dtype)
if index_or_series == Series:
tm.assert_series_equal(result, expected)
else:
tm.assert_index_equal(result, expected)
def test_extract_expand_capture_groups(any_string_dtype):
s = Series(["A1", "B2", "C3"], dtype=any_string_dtype)
# one group, no matches
result = s.str.extract("(_)", expand=False)
expected = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=False)
expected = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=False)
expected = Series(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=False)
expected = Series(["A", "B", np.nan], name="letter", dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=[0, "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=False)
expected = Series(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
# two normal groups, one non-capturing group
s = Series(["A11", "B22", "C33"], dtype=any_string_dtype)
result = s.str.extract("([AB])([123])(?:[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one optional group followed by one normal group
s = Series(["A1", "B2", "3"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, "3"]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group followed by one optional group
s = Series(["A1", "B2", "C"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_expand_capture_groups_index(index, any_string_dtype):
# https://github.com/pandas-dev/pandas/issues/6348
# not passing index to the extractor
data = ["A1", "B2", "C"]
if len(index) < len(data):
pytest.skip("Index too short")
index = index[: len(data)]
s = Series(data, index=index, dtype=any_string_dtype)
result = s.str.extract(r"(\d)", expand=False)
expected = Series(["1", "2", np.nan], index=index, dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=False)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
index=index,
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_single_series_name_is_preserved(any_string_dtype):
s = Series(["a3", "b3", "c2"], name="bob", dtype=any_string_dtype)
result = s.str.extract(r"(?P<sue>[a-z])", expand=False)
expected = Series(["a", "b", "c"], name="sue", dtype=any_string_dtype)
tm.assert_series_equal(result, expected)
def test_extract_expand_True(any_string_dtype):
# Contains tests like those in test_match and some others.
s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)
result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
expected = DataFrame(
[["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
def test_extract_expand_True_mixed_object():
er = [np.nan, np.nan] # empty row
mixed = Series(
[
"aBAD_BAD",
np.nan,
"BAD_b_BAD",
True,
datetime.today(),
"foo",
None,
1,
2.0,
]
)
result = mixed.str.extract(".*(BAD[_]+).*(BAD)", expand=True)
expected = DataFrame([["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er])
tm.assert_frame_equal(result, expected)
def test_extract_expand_True_single_capture_group_raises(
index_or_series, any_string_dtype
):
# these should work for both Series and Index
# no groups
s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)
msg = "pattern contains no capture groups"
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("[ABC][123]", expand=True)
# only non-capturing groups
with pytest.raises(ValueError, match=msg):
s_or_idx.str.extract("(?:[AB]).*", expand=True)
def test_extract_expand_True_single_capture_group(index_or_series, any_string_dtype):
# single group renames series/index properly
s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)
result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=True)
expected_dtype = "object" if index_or_series is Index else any_string_dtype
expected = DataFrame({"uno": ["A", "A"]}, dtype=expected_dtype)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("name", [None, "series_name"])
def test_extract_series(name, any_string_dtype):
# extract should give the same result whether or not the series has a name.
s = Series(["A1", "B2", "C3"], name=name, dtype=any_string_dtype)
# one group, no matches
result = s.str.extract("(_)", expand=True)
expected = DataFrame([np.nan, np.nan, np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two groups, no matches
result = s.str.extract("(_)(_)", expand=True)
expected = DataFrame(
[[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one group, some matches
result = s.str.extract("([AB])[123]", expand=True)
expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two groups, some matches
result = s.str.extract("([AB])([123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one named group
result = s.str.extract("(?P<letter>[AB])", expand=True)
expected = DataFrame({"letter": ["A", "B", np.nan]}, dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
# two named groups
result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# mix named and unnamed groups
result = s.str.extract("([AB])(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]],
columns=[0, "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group, one non-capturing group
result = s.str.extract("([AB])(?:[123])", expand=True)
expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
def test_extract_optional_groups(any_string_dtype):
# two normal groups, one non-capturing group
s = Series(["A11", "B22", "C33"], dtype=any_string_dtype)
result = s.str.extract("([AB])([123])(?:[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype
)
tm.assert_frame_equal(result, expected)
# one optional group followed by one normal group
s = Series(["A1", "B2", "3"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], [np.nan, "3"]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
# one normal group followed by one optional group
s = Series(["A1", "B2", "C"], dtype=any_string_dtype)
result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_dataframe_capture_groups_index(index, any_string_dtype):
# GH6348
# not passing index to the extractor
data = ["A1", "B2", "C"]
if len(index) < len(data):
pytest.skip("Index too short")
index = index[: len(data)]
s = Series(data, index=index, dtype=any_string_dtype)
result = s.str.extract(r"(\d)", expand=True)
expected = DataFrame(["1", "2", np.nan], index=index, dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=True)
expected = DataFrame(
[["A", "1"], ["B", "2"], ["C", np.nan]],
columns=["letter", "number"],
index=index,
dtype=any_string_dtype,
)
tm.assert_frame_equal(result, expected)
def test_extract_single_group_returns_frame(any_string_dtype):
# GH11386 extract should always return DataFrame, even when
# there is only one group. Prior to v0.18.0, extract returned
# Series when there was only one group in the regex.
s = Series(["a3", "b3", "c2"], name="series_name", dtype=any_string_dtype)
result = s.str.extract(r"(?P<letter>[a-z])", expand=True)
expected = DataFrame({"letter": ["a", "b", "c"]}, dtype=any_string_dtype)
tm.assert_frame_equal(result, expected)
def test_extractall(any_string_dtype):
data = [
"<EMAIL>",
"<EMAIL>",
"<EMAIL>",
"<EMAIL> some text <EMAIL>",
"<EMAIL> some text <EMAIL> and <EMAIL>",
np.nan,
"",
]
expected_tuples = [
("dave", "google", "com"),
("tdhock5", "gmail", "com"),
("maudelaperriere", "gmail", "com"),
("rob", "gmail", "com"),
("steve", "gmail", "com"),
("a", "b", "com"),
("c", "d", "com"),
("e", "f", "com"),
]
pat = r"""
(?P<user>[a-z0-9]+)
@
(?P<domain>[a-z]+)
\.
(?P<tld>[a-z]{2,4})
"""
expected_columns = ["user", "domain", "tld"]
s = Series(data, dtype=any_string_dtype)
# extractall should return a DataFrame with one row for each match, indexed by the
# subject from which the match came.
expected_index = MultiIndex.from_tuples(
[(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (4, 1), (4, 2)],
names=(None, "match"),
)
expected = DataFrame(
expected_tuples, expected_index, expected_columns, dtype=any_string_dtype
)
result = s.str.extractall(pat, flags=re.VERBOSE)
tm.assert_frame_equal(result, expected)
# The index of the input Series should be used to construct the index of the output
# DataFrame:
mi = MultiIndex.from_tuples(
[
("single", "Dave"),
("single", "Toby"),
("single", "Maude"),
("multiple", "robAndSteve"),
("multiple", "abcdef"),
("none", "missing"),
("none", "empty"),
]
)
s = Series(data, index=mi, dtype=any_string_dtype)
expected_index = MultiIndex.from_tuples(
[
("single", "Dave", 0),
("single", "Toby", 0),
("single", "Maude", 0),
("multiple", "robAndSteve", 0),
("multiple", "robAndSteve", 1),
("multiple", "abcdef", 0),
("multiple", "abcdef", 1),
("multiple", "abcdef", 2),
],
names=(None, None, "match"),
)
expected = DataFrame(
expected_tuples, expected_index, expected_columns, dtype=any_string_dtype
)
result = s.str.extractall(pat, flags=re.VERBOSE)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
from datetime import timedelta
import numpy as np
from pandas.core.dtypes.dtypes import DatetimeTZDtype
import pandas as pd
from pandas import (
DataFrame,
Series,
date_range,
option_context,
)
import pandas._testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_empty_frame_dtypes(self):
empty_df = DataFrame()
tm.assert_series_equal(empty_df.dtypes, Series(dtype=object))
nocols_df = DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes,
|
Series(dtype=object)
|
pandas.Series
|
import pandas as pd
import numpy as np
from scipy.stats import ttest_ind
from Regression import linear_regression
help(linear_regression)
lr = linear_regression()
class Robustness:
def stars(self, p):
if p <= 0.001:
return '***'
elif p <= 0.05:
return '**'
elif p <= 0.1:
return '*'
else:
return ''
def double_sort(self, X, y, group_names, ngroup=5, take_in_reg = False):
"""
X: contains cat_names
take_in_reg: whether take the group_names into regression or not, Default False -> treate like traditional Fama Model alpha
group_names: list of two strings, first name will be show on the index, second name will be show on the column
sort the regression residuals by two cat_names, compare n (biggest) vs 1 (smallest) group by t-test
"""
X_cols = list(X.columns)
if not take_in_reg:
for group in group_names:
X_cols.remove(group)
lr.ols_fit(X[X_cols], y, if_print=False)
resid = lr.get_residuals()
XX = pd.concat([X[group_names], pd.Series(resid, name='residual', index=X.index)], axis=1)
for group in group_names:
XX[group + '_group'] = pd.qcut(XX[group].rank(method='first'), ngroup, labels=False) + 1 # 1-smallest, 5-biggist
ds_df = pd.pivot_table(XX, values='residual', columns=group_names[1] + '_group', index=group_names[0] + '_group',aggfunc='mean')
test_0 = ds_df.loc[5,:] - ds_df.loc[1,:] # test for first group_name, will add as the last raw
test_1 = ds_df.loc[:,5] - ds_df.loc[:, 1] # test for first group_name, will add as the last column
XX_group = XX.groupby([group+'_group' for group in group_names])
test_0_stars = ["{:.4f}".format(test_0[i]) + self.stars(ttest_ind(XX_group.get_group((1, i))['residual'], XX_group.get_group((5, i))['residual'])[1]) for i in range(1,6)]
test_1_stars = ["{:.4f}".format(test_1[i]) + self.stars(ttest_ind(XX_group.get_group((i, 1))['residual'], XX_group.get_group((i, 5))['residual'])[1]) for i in range(1,6)]
ds_df = pd.concat([ds_df, pd.DataFrame({group_names[0] + ' (5-1)':test_0_stars}, index=ds_df.columns).T], axis=0)
ds_df = pd.concat([ds_df,
|
pd.DataFrame({group_names[1] + ' (5-1)':test_1_stars}, index=ds_df.columns)
|
pandas.DataFrame
|
#
# Copyright 2020 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Testing out the datacompy functionality
"""
import io
import logging
import sys
from datetime import datetime
from decimal import Decimal
from unittest import mock
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_series_equal
from pytest import raises
import datacompy
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def test_numeric_columns_equal_abs():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, abs_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_numeric_columns_equal_rel():
data = """a|b|expected
1|1|True
2|2.1|True
3|4|False
4|NULL|False
NULL|4|False
NULL|NULL|True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |False
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |False
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2)
expect_out = df["expected"]
assert_series_equal(expect_out, actual_out, check_names=False)
def test_string_columns_equal_with_ignore_spaces():
data = """a|b|expected
Hi|Hi|True
Yo|Yo|True
Hey|Hey |True
résumé|resume|False
résumé|résumé|True
💩|💩|True
💩|🤔|False
| |True
| |True
datacompy|DataComPy|False
something||False
|something|False
||True"""
df = pd.read_csv(io.StringIO(data), sep="|")
actual_out = datacompy.columns_equal(df.a, df.b, rel_tol=0.2, ignore_spaces=True)
expect_out = df["expected"]
|
assert_series_equal(expect_out, actual_out, check_names=False)
|
pandas.util.testing.assert_series_equal
|
from __future__ import division, print_function
import os
import pandas as pd
import numpy as np
import torch
import h5py
from torch.utils.data import Dataset
from torchvision.io import read_video
class UntrimmedVideoDataset(Dataset):
'''
UntrimmedVideoDataset:
This dataset takes in temporal segments from untrimmed videos and samples fixed-length
clips from each segment. Each item in the dataset is a dictionary with the keys:
- "clip": A Tensor (dtype=torch.float) of the clip frames after applying transforms
- "label-Y": A label from the `label_columns` (one key for each label) or -1 if label is missing for that clip
- "gvf": The global video feature (GVF) vector if `global_video_features` parameter is not None
'''
def __init__(self, csv_filename, root_dir, clip_length, frame_rate, clips_per_segment, temporal_jittering,
label_columns, label_mappings, seed=42, transforms=None, global_video_features=None, debug=False):
'''
Args:
csv_filename (string): Path to the CSV file with temporal segments information and annotations.
The CSV file must include the columns [filename, fps, t-start, t-end, video-duration] and
the label columns given by the parameter `label_columns`.
root_dir (string): Directory with all the video files.
clip_length (int): The number of frames per clip.
frame_rate (int): The effective frame rate (fps) to sample clips.
clips_per_segment (int): The number of clips to sample per segment in the CSV file.
temporal_jittering (bool): If True, clips are randomly sampled between t-start and t-end of
each segment. Otherwise, clips are are sampled uniformly between t-start and t-end.
seed (int): Seed of the random number generator used for the temporal jittering.
transforms (callable): A function/transform that takes in a TxHxWxC video
and returns a transformed version.
label_columns (list of string): A list of the label columns in the CSV file.
If more than one column is specified, the sample return a label for each.
label_mappings (list of dict): A list of dictionaries to map the corresponding label
from `label_columns` from a category string to an integer ID value.
global_video_features (string): Path to h5 file containing global video features (optional)
debug (bool): If true, create a debug dataset with 100 samples.
'''
df = UntrimmedVideoDataset._clean_df_and_remove_short_segments(
|
pd.read_csv(csv_filename)
|
pandas.read_csv
|
#!/usr/bin/env python3
##########################################################
## <NAME> ##
## Copyright (C) 2019 <NAME>, IGTP, Spain ##
##########################################################
"""
Generates sample identification using KMA software and MLSTar.
Looks for similar entries on GenBank and retrieves them.
"""
## useful imports
import time
import io
import os
import re
import sys
import concurrent.futures
from termcolor import colored
import pandas as pd
## import my modules
from BacterialTyper.scripts import species_identification_KMA
from BacterialTyper.scripts import database_generator
from BacterialTyper.scripts import MLSTar
from BacterialTyper.scripts import edirect_caller
from BacterialTyper.modules import help_info
from BacterialTyper.config import set_config
from BacterialTyper import __version__ as pipeline_version
import HCGB
from HCGB import sampleParser
import HCGB.functions.aesthetics_functions as HCGB_aes
import HCGB.functions.time_functions as HCGB_time
import HCGB.functions.main_functions as HCGB_main
import HCGB.functions.files_functions as HCGB_files
####################################
def run_ident(options):
"""
Main function acting as an entry point to the module *ident*.
Arguments:
.. seealso:: Additional information to PubMLST available datasets.
- :doc:`PubMLST datasets<../../../data/PubMLST_datasets>`
"""
##################################
### show help messages if desired
##################################
if (options.help_format):
## help_format option
sampleParser.help_format()
exit()
elif (options.help_project):
## information for project
help_info.project_help()
exit()
elif (options.help_KMA):
## information for KMA Software
species_identification_KMA.help_kma_database()
exit()
elif (options.help_MLSTar):
## information for KMA Software
MLSTar.help_MLSTar()
exit()
## init time
start_time_total = time.time()
## debugging messages
global Debug
if (options.debug):
Debug = True
else:
Debug = False
### set as default paired_end mode
if (options.single_end):
options.pair = False
else:
options.pair = True
### species_identification_KMA -> most similar taxa
HCGB_aes.pipeline_header("BacterialTyper", ver=pipeline_version)
HCGB_aes.boxymcboxface("Species identification")
print ("--------- Starting Process ---------")
HCGB_time.print_time()
## absolute path for in & out
input_dir = os.path.abspath(options.input)
outdir=""
## Project mode as default
global Project
if (options.detached):
options.project = False
project_mode=False
outdir = os.path.abspath(options.output_folder)
Project=False
else:
options.project = True
outdir = input_dir
Project=True
## get files
pd_samples_retrieved = sampleParser.files.get_files(options, input_dir, "trim", ['_trim'], options.debug)
## debug message
if (Debug):
print (colored("**DEBUG: pd_samples_retrieve **", 'yellow'))
print (pd_samples_retrieved)
## generate output folder, if necessary
print ("\n+ Create output folder(s):")
if not options.project:
HCGB_files.create_folder(outdir)
## for each sample
outdir_dict = HCGB_files.outdir_project(outdir, options.project, pd_samples_retrieved, "ident", options.debug)
## let's start the process
print ("+ Generate an species typification for each sample retrieved using:")
print ("(1) Kmer alignment (KMA) software.")
print ("(2) Pre-defined databases by KMA or user-defined databases.")
## get databases to check
retrieve_databases = get_options_db(options)
## time stamp
start_time_partial = HCGB_time.timestamp(start_time_total)
## debug message
if (Debug):
print (colored("**DEBUG: retrieve_database **", 'yellow'))
|
pd.set_option('display.max_colwidth', None)
|
pandas.set_option
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from meterstick import metrics
from meterstick import operations
from meterstick import utils
import mock
import numpy as np
import pandas as pd
from pandas import testing
from scipy import stats
import unittest
class DistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
distribution = operations.Distribution('grp', sum_x)
def test_distribution(self):
output = self.distribution.compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_normalize(self):
output = operations.Normalize('grp', self.sum_x).compute_on(self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
dist = operations.Distribution(['grp', 'platform'], sum_x)
output = dist.compute_on(df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 0.5, 0.25, 0.25],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_melted(self):
output = self.distribution.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.25, 0.75],
'grp': ['A', 'B'],
'Metric': ['Distribution of sum(X)', 'Distribution of sum(X)']
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby(self):
output = self.distribution.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Distribution of sum(X)': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_melted(self):
output = self.distribution.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 2. / 3, 1. / 3],
'grp': ['B', 'A', 'B'],
'Metric': ['Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 1, 2, 3.5],
'grp': ['A', 'A', 'B', 'B'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.distribution.compute_on(df, ['grp0', 'country'])
bar = self.distribution.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.distribution.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
testing.assert_frame_equal(output, expected)
def test_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.Distribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Distribution of sum(X)': [0.25, 0.75],
'Distribution of count(X)': [0.5, 0.5]
},
index=['A', 'B'],
columns=['Distribution of sum(X)', 'Distribution of count(X)'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_where(self):
metric = operations.Distribution('grp', self.sum_x, where='country == "US"')
metric_no_filter = operations.Distribution('grp', self.sum_x)
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_distribution_pipeline(self):
output = self.sum_x | operations.Distribution('grp') | metrics.compute_on(
self.df)
expected = pd.DataFrame({'Distribution of sum(X)': [0.25, 0.75]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.Distribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_distribution_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.Distribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class CumulativeDistributionTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU']
})
sum_x = metrics.Sum('X')
metric = operations.CumulativeDistribution('grp', sum_x)
def test_cumulative_distribution(self):
output = self.metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_over_multiple_columns(self):
df = pd.DataFrame({
'X': [2, 1, 1, 5],
'grp': ['A', 'A', 'B', 'B'],
'country': ['US', 'US', 'US', 'EU'],
'platform': ['desktop', 'mobile', 'desktop', 'mobile']
})
sum_x = metrics.Sum('X')
cum_dict = operations.CumulativeDistribution(['grp', 'platform'], sum_x)
output = cum_dict.compute_on(df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 0.5, 0.75, 1],
'country': ['EU', 'US', 'US', 'US'],
'grp': ['B', 'A', 'A', 'B'],
'platform': ['mobile', 'desktop', 'mobile', 'desktop']
})
expected.set_index(['country', 'grp', 'platform'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_melted(self):
output = self.metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0.75, 1.],
'grp': ['A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 2
})
expected.set_index(['Metric', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby(self):
output = self.metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_melted(self):
output = self.metric.compute_on(self.df, 'country', melted=True)
expected = pd.DataFrame({
'Value': [1., 1. / 3, 1.],
'grp': ['A', 'A', 'B'],
'Metric': ['Cumulative Distribution of sum(X)'] * 3,
'country': ['EU', 'US', 'US']
})
expected.set_index(['Metric', 'country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 1, 1, 5, 0, 2, 1.5, 3],
'grp': ['B', 'B', 'A', 'A'] * 2,
'country': ['US', 'US', 'US', 'EU'] * 2,
'grp0': ['foo'] * 4 + ['bar'] * 4
})
output = self.metric.compute_on(df, ['grp0', 'country'])
output.sort_index(level=['grp0', 'grp', 'country'], inplace=True)
bar = self.metric.compute_on(df[df.grp0 == 'bar'], 'country')
foo = self.metric.compute_on(df[df.grp0 == 'foo'], 'country')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected = expected.sort_index(level=['grp0', 'grp', 'country'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_ascending(self):
metric = operations.CumulativeDistribution(
'grp', self.sum_x, ascending=False)
output = metric.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.25, 1.]},
index=['B', 'A'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_order_splitby(self):
metric = operations.CumulativeDistribution('grp', self.sum_x, ('B', 'A'))
output = metric.compute_on(self.df, 'country')
expected = pd.DataFrame({
'Cumulative Distribution of sum(X)': [1., 2. / 3, 1.],
'grp': ['A', 'B', 'A'],
'country': ['EU', 'US', 'US']
})
expected.set_index(['country', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_multiple_metrics(self):
metric = metrics.MetricList((self.sum_x, metrics.Count('X')))
metric = operations.CumulativeDistribution('grp', metric)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
{
'Cumulative Distribution of sum(X)': [0.75, 1.],
'Cumulative Distribution of count(X)': [0.5, 1.]
},
index=['A', 'B'],
columns=[
'Cumulative Distribution of sum(X)',
'Cumulative Distribution of count(X)'
])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_where(self):
metric = operations.CumulativeDistribution(
'grp', metrics.Count('X'), where='country == "US"')
metric_no_filter = operations.CumulativeDistribution(
'grp', metrics.Count('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.country == 'US'])
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_pipeline(self):
output = self.sum_x | operations.CumulativeDistribution(
'grp') | metrics.compute_on(self.df)
expected = pd.DataFrame({'Cumulative Distribution of sum(X)': [0.75, 1.]},
index=['A', 'B'])
expected.index.name = 'grp'
testing.assert_frame_equal(output, expected)
def test_cumulative_distribution_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.CumulativeDistribution('grp', sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('grp').X.sum(), sum_x.get_cached(42, 'grp'))
self.assertTrue(metric.in_cache(42))
def test_cumulative_distribution_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_cumulative_distribution_with_jackknife_internal_caching_cleaned_up(
self):
df = pd.DataFrame({
'X': [1, 1, 1, 5],
'grp': ['B', 'B', 'A', 'A'],
'country': ['US', 'US', 'US', 'EU'],
'cookie': [1, 2, 1, 2]
})
sum_x = metrics.Sum('X')
m = operations.CumulativeDistribution('grp', sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class PercentChangeTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
metric_lst = metrics.MetricList((metrics.Sum('X'), metrics.Count('X')))
def test_percent_change(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_include_baseline(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[0., 0.], [150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[0, 1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_melted(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [150., 0.],
'Metric': ['sum(X) Percent Change', 'count(X) Percent Change'],
'Condition': [1, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_melted_include_baseline(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, melted=True)
expected = pd.DataFrame({
'Value': [0., 150., 0., 0.],
'Metric': [
'sum(X) Percent Change', 'sum(X) Percent Change',
'count(X) Percent Change', 'count(X) Percent Change'
],
'Condition': [0, 1, 0, 1]
})
expected.set_index(['Metric', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp')
expected = pd.DataFrame(
{
'sum(X) Percent Change': [0., 100. / 3, 0., 200. / 3, np.nan],
'count(X) Percent Change': [0., -50., 0., 0., np.nan],
'Condition': [0, 1, 0, 1, 1],
'grp': ['A', 'A', 'B', 'B', 'C']
},
columns=[
'sum(X) Percent Change', 'count(X) Percent Change', 'Condition',
'grp'
])
expected.set_index(['grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby_melted(self):
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(self.df, 'grp', melted=True)
expected = pd.DataFrame({
'Value': [0., 100. / 3, 0., 200. / 3, np.nan, 0., -50., 0., 0., np.nan],
'Metric': ['sum(X) Percent Change'] * 5 +
['count(X) Percent Change'] * 5,
'Condition': [0, 1, 0, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'B', 'C'] * 2
})
expected.set_index(['Metric', 'grp', 'Condition'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_splitby_multiple(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6, 1.2, 2.2, 3.2, 4.2, 5.2, 6.5],
'Condition': [0, 0, 0, 1, 1, 1] * 2,
'grp': ['A', 'A', 'B', 'A', 'B', 'C'] * 2,
'grp0': ['foo'] * 6 + ['bar'] * 6
})
metric = operations.PercentChange('Condition', 0, self.metric_lst, True)
output = metric.compute_on(df, ['grp0', 'grp'])
bar = metric.compute_on(df[df.grp0 == 'bar'], 'grp')
foo = metric.compute_on(df[df.grp0 == 'foo'], 'grp')
expected = pd.concat([bar, foo], keys=['bar', 'foo'], names=['grp0'])
expected.sort_index(level=['grp0', 'grp'], inplace=True)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns(self):
df = self.df.copy()
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_include_baseline(self):
df = self.df.copy()
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df)
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df)
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_splitby(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_multiple_condition_columns_include_baseline_splitby(
self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'B'],
'grp2': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar']
})
metric = operations.PercentChange(['Condition', 'grp'], (0, 'A'),
self.metric_lst, True)
output = metric.compute_on(df, 'grp2')
df['Condition_and_grp'] = df[['Condition', 'grp']].apply(tuple, 1)
expected_metric = operations.PercentChange('Condition_and_grp', (0, 'A'),
self.metric_lst, True)
expected = expected_metric.compute_on(df, 'grp2')
expected = pd.DataFrame(
expected.values, index=output.index, columns=output.columns)
testing.assert_frame_equal(output, expected)
def test_percent_change_where(self):
metric = operations.PercentChange(
'Condition', 0, metrics.Sum('X'), where='grp == "A"')
metric_no_filter = operations.PercentChange('Condition', 0,
metrics.Sum('X'))
output = metric.compute_on(self.df)
expected = metric_no_filter.compute_on(self.df[self.df.grp == 'A'])
testing.assert_frame_equal(output, expected)
def test_percent_change_pipeline(self):
metric = operations.PercentChange('Condition', 0)
output = self.metric_lst | metric | metrics.compute_on(self.df)
expected = pd.DataFrame(
[[150., 0.]],
columns=['sum(X) Percent Change', 'count(X) Percent Change'],
index=[1])
expected.index.name = 'Condition'
testing.assert_frame_equal(output, expected)
def test_percent_change_cache_key(self):
sum_x = metrics.Sum('X', 'X')
metric = operations.PercentChange('Condition', 0, sum_x)
metric.compute_on(self.df, cache_key=42)
testing.assert_series_equal(
self.df.groupby('Condition').X.sum(), sum_x.get_cached(42, 'Condition'))
self.assertTrue(metric.in_cache(42))
def test_percent_change_internal_caching_cleaned_up(self):
sum_x = metrics.Sum('X')
m = operations.PercentChange('Condition', 0, sum_x)
m.compute_on(self.df)
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(sum_x.cache_key)
self.assertIsNone(m.cache_key)
def test_percent_change_with_jackknife_internal_caching_cleaned_up(self):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C'],
'cookie': [1, 2, 3] * 2
})
sum_x = metrics.Sum('X')
m = operations.PercentChange('Condition', 0, sum_x)
jk = operations.Jackknife('cookie', m)
jk.compute_on(df)
self.assertEqual(jk.cache, {})
self.assertEqual(m.cache, {})
self.assertEqual(sum_x.cache, {})
self.assertIsNone(jk.cache_key)
self.assertIsNone(m.cache_key)
self.assertIsNone(sum_x.cache_key)
class AbsoluteChangeTests(unittest.TestCase):
df = pd.DataFrame({
'X': [1, 2, 3, 4, 5, 6],
'Condition': [0, 0, 0, 1, 1, 1],
'grp': ['A', 'A', 'B', 'A', 'B', 'C']
})
metric_lst = metrics.MetricList((metrics.Sum('X'), metrics.Count('X')))
def test_absolute_change(self):
metric = operations.AbsoluteChange('Condition', 0, self.metric_lst)
output = metric.compute_on(self.df)
expected = pd.DataFrame(
[[9, 0]],
columns=['sum(X) Absolute Change', 'count(X) Absolute Change'],
index=[1])
expected.index.name = 'Condition'
|
testing.assert_frame_equal(output, expected)
|
pandas.testing.assert_frame_equal
|
import os
import zipfile
import calendar
from pathlib import Path
from glob import glob
import shapefile
from future.utils import lzip
from delphi.utils.shell import cd
from delphi.utils.web import download_file
from delphi.paths import data_dir
from tqdm import tqdm
import matplotlib as mpl
import pandas as pd
mpl.rcParams["backend"] = "Agg"
from matplotlib import pyplot as plt
from shapely.geometry import Polygon, MultiPolygon
def download_FEWSNET_admin_boundaries_data():
url = "http://shapefiles.fews.net.s3.amazonaws.com/ADMIN/FEWSNET_World_Admin.zip"
zipfile_name = Path(data_dir) / url.split("/")[-1]
download_file(url, zipfile_name)
directory = Path(data_dir) / (url.split("/")[-1].split(".")[0])
os.makedirs(directory, exist_ok=True)
with zipfile.ZipFile(zipfile_name) as zf:
zf.extractall(directory)
def download_and_clean_FEWSNET_IPC_data():
url = "http://shapefiles.fews.net.s3.amazonaws.com/ALL_HFIC.zip"
zipfile_name = Path(data_dir) / url.split("/")[-1]
download_file(url, zipfile_name)
with zipfile.ZipFile(zipfile_name) as zf:
zf.extractall(data_dir)
with cd(str(Path(data_dir) / "ALL_HFIC" / "East Africa")):
files_to_rename = glob("EA2017*")
for f in files_to_rename:
os.rename(f, f.replace("EA", "EA_"))
def process_FEWSNET_IPC_data(shpfile: str, title: str):
admin_boundaries_shapefile = "data/FEWSNET_World_Admin/FEWSNET_Admin2"
sf_admin = shapefile.Reader(admin_boundaries_shapefile)
colors = {
0: "white",
1: "#c3e2c3",
2: "#f3e838",
3: "#eb7d24",
4: "#cd2026",
5: "#5d060c",
66: "aqua",
88: "white",
99: "white",
}
sf = shapefile.Reader(shpfile)
fig, ax = plt.subplots(figsize=(12, 12))
ax.set_aspect("equal")
ax.set_title(title)
plt.style.use("ggplot")
def fill_and_plot(points, color_code):
xs, ys = lzip(*points)
ax.plot(xs, ys, linewidth=0.5, color="grey")
ax.fill(xs, ys, color=colors[color_code])
fs_polygons = []
for i, sr in tqdm(enumerate(sf.shapeRecords())):
nparts = len(sr.shape.parts)
parts, points = sr.shape.parts, sr.shape.points
CS = int(sr.record[0])
if nparts == 1:
# fill_and_plot(points, CS)
fs_polygons.append((Polygon(points), int(sr.record[0])))
else:
for ip, part in enumerate(parts):
if ip < nparts - 1:
i1 = parts[ip + 1] - 1
else:
i1 = len(points)
# fill_and_plot(points[part : i1 + 1], CS),
fs_polygons.append(
(Polygon(points[part : i1 + 1]), int(sr.record[0]))
)
south_sudan_srs = [
sr for sr in sf_admin.shapeRecords() if sr.record[3] == "South Sudan"
]
lines = []
for sr in tqdm(south_sudan_srs, desc="South Sudan Counties"):
county_polygon = Polygon(sr.shape.points)
for fs_polygon in tqdm(fs_polygons, desc="fs_polygons"):
if county_polygon.buffer(-0.05).intersects(fs_polygon[0]):
centroid = county_polygon.centroid
ax.text(
centroid.x,
centroid.y,
sr.record[8],
fontsize=6,
horizontalalignment="center",
)
xs, ys = lzip(*sr.shape.points)
CS = int(fs_polygon[1])
fill_and_plot(sr.shape.points, CS)
lines.append(
"\t".join([str(x) for x in sr.record] + [str(CS)])
)
with open("ipc_data.tsv", "w") as f:
f.write("\n".join(lines))
plt.savefig("shape.pdf")
def get_polygons(shape):
parts, points = shape.parts, shape.points
nparts = len(parts)
polygons = []
if nparts == 1:
polygons.append(Polygon(points))
else:
for ip, part in enumerate(parts):
if ip < nparts - 1:
i1 = parts[ip + 1] - 1
else:
i1 = len(points)
polygons.append(Polygon(points[part : i1 + 1]))
return polygons
def create_food_security_data_table(region: str, country: str):
admin_boundaries_shapefile = str(
Path(data_dir) / "FEWSNET_World_Admin" / "FEWSNET_Admin2"
)
sf_admin = shapefile.Reader(admin_boundaries_shapefile)
south_sudan_srs = [
x for x in sf_admin.shapeRecords() if x.record[3] == country
]
path = str(Path(data_dir) / "ALL_HFIC" / region)
ipc_records = []
with cd(path):
shapefiles = glob("*.shp")
for filename in tqdm(shapefiles, unit="shapefile"):
year = int(filename[3:7])
month = int(filename[7:9])
reader = shapefile.Reader(filename)
for i, fs_sr in tqdm(
enumerate(reader.shapeRecords()),
unit="Food security shapeRecord",
):
parts, points = fs_sr.shape.parts, fs_sr.shape.points
nparts = len(parts)
CS = int(fs_sr.record[0])
fs_polygons = get_polygons(fs_sr.shape)
for sr in tqdm(south_sudan_srs, desc=f"{country} Counties"):
county_polygon = Polygon(sr.shape.points)
for fs_polygon in tqdm(
fs_polygons, unit="Food security polygon"
):
if county_polygon.buffer(-0.05).intersects(fs_polygon):
ipc_records.append(
{
"Country": sr.record[3],
"State": sr.record[4],
"County": sr.record[8],
"Year": year,
"Month": month,
"IPC Phase": CS,
}
)
df =
|
pd.DataFrame(ipc_records)
|
pandas.DataFrame
|
from io import StringIO
import pandas as pd
import numpy as np
import pytest
import bioframe
import bioframe.core.checks as checks
# import pyranges as pr
# def bioframe_to_pyranges(df):
# pydf = df.copy()
# pydf.rename(
# {"chrom": "Chromosome", "start": "Start", "end": "End"},
# axis="columns",
# inplace=True,
# )
# return pr.PyRanges(pydf)
# def pyranges_to_bioframe(pydf):
# df = pydf.df
# df.rename(
# {"Chromosome": "chrom", "Start": "start", "End": "end", "Count": "n_intervals"},
# axis="columns",
# inplace=True,
# )
# return df
# def pyranges_overlap_to_bioframe(pydf):
# ## convert the df output by pyranges join into a bioframe-compatible format
# df = pydf.df.copy()
# df.rename(
# {
# "Chromosome": "chrom_1",
# "Start": "start_1",
# "End": "end_1",
# "Start_b": "start_2",
# "End_b": "end_2",
# },
# axis="columns",
# inplace=True,
# )
# df["chrom_1"] = df["chrom_1"].values.astype("object") # to remove categories
# df["chrom_2"] = df["chrom_1"].values
# return df
chroms = ["chr12", "chrX"]
def mock_bioframe(num_entries=100):
pos = np.random.randint(1, 1e7, size=(num_entries, 2))
df = pd.DataFrame()
df["chrom"] = np.random.choice(chroms, num_entries)
df["start"] = np.min(pos, axis=1)
df["end"] = np.max(pos, axis=1)
df.sort_values(["chrom", "start"], inplace=True)
return df
############# tests #####################
def test_select():
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
region1 = "chr1:4-10"
df_result = pd.DataFrame([["chr1", 4, 5]], columns=["chrom", "start", "end"])
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
region1 = "chrX:4-6"
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]], columns=["chrom", "start", "end"]
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1).reset_index(drop=True)
)
### select with non-standard column names
region1 = "chrX:4-6"
new_names = ["chr", "chrstart", "chrend"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 5], ["chrX", 1, 5]],
columns=new_names,
)
df_result = pd.DataFrame(
[["chrX", 3, 8], ["chrX", 1, 5]],
columns=new_names,
)
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
region1 = "chrX"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df1, region1, cols=new_names).reset_index(drop=True)
)
### select from a DataFrame with NaNs
colnames = ["chrom", "start", "end", "view_region"]
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_result = pd.DataFrame(
[["chr1", -6, 12, "chr1p"]],
columns=colnames,
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
region1 = "chr1:0-1"
pd.testing.assert_frame_equal(
df_result, bioframe.select(df, region1).reset_index(drop=True)
)
def test_trim():
### trim with view_df
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 32, 36, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
["chr1", 26, 26, "chr1q"],
["chrX", 1, 8, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
)
with pytest.raises(ValueError):
bioframe.trim(df, view_df=view_df)
# df_view_col already exists, so need to specify it:
pd.testing.assert_frame_equal(
df_trimmed, bioframe.trim(df, view_df=view_df, df_view_col="view_region")
)
### trim with view_df interpreted from dictionary for chromsizes
chromsizes = {"chr1": 20, "chrX_0": 5}
df = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX_0", 1, 8],
],
columns=["chrom", "startFunky", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 20],
["chrX_0", 1, 5],
],
columns=["chrom", "startFunky", "end"],
).astype({"startFunky": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(
df,
view_df=chromsizes,
cols=["chrom", "startFunky", "end"],
return_view_columns=False,
),
)
### trim with default limits=None and negative values
df = pd.DataFrame(
[
["chr1", -4, 12],
["chr1", 13, 26],
["chrX", -5, -1],
],
columns=["chrom", "start", "end"],
)
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12],
["chr1", 13, 26],
["chrX", 0, 0],
],
columns=["chrom", "start", "end"],
)
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim when there are NaN intervals
df = pd.DataFrame(
[
["chr1", -4, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", -5, -1, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, "chr1q"],
["chrX", 0, 0, "chrX_0"],
],
columns=["chrom", "start", "end", "region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_trimmed, bioframe.trim(df))
### trim with view_df and NA intervals
view_df = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 13, 26, "chr1q"],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "name"],
)
df = pd.DataFrame(
[
["chr1", -6, 12],
["chr1", 0, 12],
[pd.NA, pd.NA, pd.NA],
["chrX", 1, 20],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_trimmed = pd.DataFrame(
[
["chr1", 0, 12, "chr1p"],
["chr1", 0, 12, "chr1p"],
[pd.NA, pd.NA, pd.NA, pd.NA],
["chrX", 1, 12, "chrX_0"],
],
columns=["chrom", "start", "end", "view_region"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
# infer df_view_col with assign_view and ignore NAs
pd.testing.assert_frame_equal(
df_trimmed,
bioframe.trim(df, view_df=view_df, df_view_col=None, return_view_columns=True)[
["chrom", "start", "end", "view_region"]
],
)
def test_expand():
d = """chrom start end
0 chr1 1 5
1 chr1 50 55
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+")
expand_bp = 10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 -9 15
1 chr1 40 65
2 chr2 90 210"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with negative pad
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 110 190"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
expand_bp = -10
fake_expanded = bioframe.expand(fake_bioframe, expand_bp, side="left")
d = """chrom start end
0 chr1 3 5
1 chr1 52 55
2 chr2 110 200"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with multiplicative pad
mult = 0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 3 3
1 chr1 52 52
2 chr2 150 150"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
mult = 2.0
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 -1 7
1 chr1 48 58
2 chr2 50 250"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, fake_expanded)
# expand with NA and non-integer multiplicative pad
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 100 200"""
fake_bioframe = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
mult = 1.10
fake_expanded = bioframe.expand(fake_bioframe, pad=None, scale=mult)
d = """chrom start end
0 chr1 1 5
1 NA NA NA
2 chr2 95 205"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df, fake_expanded)
def test_overlap():
### test consistency of overlap(how='inner') with pyranges.join ###
### note does not test overlap_start or overlap_end columns of bioframe.overlap
df1 = mock_bioframe()
df2 = mock_bioframe()
assert df1.equals(df2) == False
# p1 = bioframe_to_pyranges(df1)
# p2 = bioframe_to_pyranges(df2)
# pp = pyranges_overlap_to_bioframe(p1.join(p2, how=None))[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# bb = bioframe.overlap(df1, df2, how="inner")[
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"]
# ]
# pp = pp.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# bb = bb.sort_values(
# ["chrom_1", "start_1", "end_1", "chrom_2", "start_2", "end_2"],
# ignore_index=True,
# )
# pd.testing.assert_frame_equal(bb, pp, check_dtype=False, check_exact=False)
# print("overlap elements agree")
### test overlap on= [] ###
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[["chr1", 6, 10, "+", "dog"], ["chrX", 7, 10, "-", "dog"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 3
b = bioframe.overlap(
df1,
df2,
on=["strand"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 2
b = bioframe.overlap(
df1,
df2,
on=None,
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
return_index=True,
return_input=False,
)
assert np.sum(pd.isna(b["index_"].values)) == 0
### test overlap 'left', 'outer', and 'right'
b = bioframe.overlap(
df1,
df2,
on=None,
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="outer",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 5
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="inner",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 0
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="right",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 2
b = bioframe.overlap(
df1,
df2,
on=["animal"],
how="left",
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
assert len(b) == 3
### test keep_order and NA handling
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+"],
[pd.NA, pd.NA, pd.NA, "-"],
["chrX", 1, 8, "+"],
],
columns=["chrom", "start", "end", "strand"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, "+"], [pd.NA, pd.NA, pd.NA, "-"], ["chrX", 7, 10, "-"]],
columns=["chrom2", "start2", "end2", "strand"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=True, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
assert ~df1.equals(
bioframe.overlap(
df1, df2, how="left", keep_order=False, cols2=["chrom2", "start2", "end2"]
)[["chrom", "start", "end", "strand"]]
)
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chrX", 1, 8, pd.NA, pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[["chr1", 6, 10, pd.NA, "tiger"]],
columns=["chrom2", "start2", "end2", "strand", "animal"],
).astype({"start2": pd.Int64Dtype(), "end2": pd.Int64Dtype()})
assert (
bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
keep_order=False,
).shape
== (3, 12)
)
### result of overlap should still have bedframe-like properties
overlap_df = bioframe.overlap(
df1,
df2,
how="outer",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
overlap_df = bioframe.overlap(
df1,
df2,
how="innter",
cols2=["chrom2", "start2", "end2"],
return_index=True,
suffixes=("", ""),
)
assert checks.is_bedframe(
overlap_df[df1.columns],
)
assert checks.is_bedframe(
overlap_df[df2.columns], cols=["chrom2", "start2", "end2"]
)
# test keep_order incompatible if how!= 'left'
with pytest.raises(ValueError):
bioframe.overlap(
df1,
df2,
how="outer",
on=["animal"],
cols2=["chrom2", "start2", "end2"],
keep_order=True,
)
def test_cluster():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 1])
).all() # the last interval does not overlap the first three
df_annotated = bioframe.cluster(df1, min_dist=2)
assert (
df_annotated["cluster"].values == np.array([0, 0, 0, 0])
).all() # all intervals part of the same cluster
df_annotated = bioframe.cluster(df1, min_dist=None)
assert (
df_annotated["cluster"].values == np.array([0, 0, 1, 2])
).all() # adjacent intervals not clustered
df1.iloc[0, 0] = "chrX"
df_annotated = bioframe.cluster(df1)
assert (
df_annotated["cluster"].values == np.array([2, 0, 0, 1])
).all() # do not cluster intervals across chromosomes
# test consistency with pyranges (which automatically sorts df upon creation and uses 1-based indexing for clusters)
# assert (
# (bioframe_to_pyranges(df1).cluster(count=True).df["Cluster"].values - 1)
# == bioframe.cluster(df1.sort_values(["chrom", "start"]))["cluster"].values
# ).all()
# test on=[] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert (
bioframe.cluster(df1, on=["animal"])["cluster"].values == np.array([0, 1, 0, 2])
).all()
assert (
bioframe.cluster(df1, on=["strand"])["cluster"].values == np.array([0, 1, 1, 2])
).all()
assert (
bioframe.cluster(df1, on=["location", "animal"])["cluster"].values
== np.array([0, 2, 1, 3])
).all()
### test cluster with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.cluster(df1)["cluster"].max() == 3
assert bioframe.cluster(df1, on=["strand"])["cluster"].max() == 4
pd.testing.assert_frame_equal(df1, bioframe.cluster(df1)[df1.columns])
assert checks.is_bedframe(
bioframe.cluster(df1, on=["strand"]),
cols=["chrom", "cluster_start", "cluster_end"],
)
assert checks.is_bedframe(
bioframe.cluster(df1), cols=["chrom", "cluster_start", "cluster_end"]
)
assert checks.is_bedframe(bioframe.cluster(df1))
def test_merge():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
["chr1", 3, 8],
["chr1", 8, 10],
["chr1", 12, 14],
],
columns=["chrom", "start", "end"],
)
# the last interval does not overlap the first three with default min_dist=0
assert (bioframe.merge(df1)["n_intervals"].values == np.array([3, 1])).all()
# adjacent intervals are not clustered with min_dist=none
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values == np.array([2, 1, 1])
).all()
# all intervals part of one cluster
assert (
bioframe.merge(df1, min_dist=2)["n_intervals"].values == np.array([4])
).all()
df1.iloc[0, 0] = "chrX"
assert (
bioframe.merge(df1, min_dist=None)["n_intervals"].values
== np.array([1, 1, 1, 1])
).all()
assert (
bioframe.merge(df1, min_dist=0)["n_intervals"].values == np.array([2, 1, 1])
).all()
# total number of intervals should equal length of original dataframe
mock_df = mock_bioframe()
assert np.sum(bioframe.merge(mock_df, min_dist=0)["n_intervals"].values) == len(
mock_df
)
# # test consistency with pyranges
# pd.testing.assert_frame_equal(
# pyranges_to_bioframe(bioframe_to_pyranges(df1).merge(count=True)),
# bioframe.merge(df1),
# check_dtype=False,
# check_exact=False,
# )
# test on=['chrom',...] argument
df1 = pd.DataFrame(
[
["chr1", 3, 8, "+", "cat", 5.5],
["chr1", 3, 8, "-", "dog", 6.5],
["chr1", 6, 10, "-", "cat", 6.5],
["chrX", 6, 10, "-", "cat", 6.5],
],
columns=["chrom", "start", "end", "strand", "animal", "location"],
)
assert len(bioframe.merge(df1, on=None)) == 2
assert len(bioframe.merge(df1, on=["strand"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location"])) == 3
assert len(bioframe.merge(df1, on=["strand", "location", "animal"])) == 4
d = """ chrom start end animal n_intervals
0 chr1 3 10 cat 2
1 chr1 3 8 dog 1
2 chrX 6 10 cat 1"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.merge(df1, on=["animal"]),
check_dtype=False,
)
# merge with repeated indices
df = pd.DataFrame(
{"chrom": ["chr1", "chr2"], "start": [100, 400], "end": [110, 410]}
)
df.index = [0, 0]
pd.testing.assert_frame_equal(
df.reset_index(drop=True), bioframe.merge(df)[["chrom", "start", "end"]]
)
# test merge with NAs
df1 = pd.DataFrame(
[
["chrX", 1, 8, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA, "-", pd.NA],
["chr1", 8, 12, "+", pd.NA],
["chr1", 1, 8, np.nan, pd.NA],
[pd.NA, np.nan, pd.NA, "-", pd.NA],
],
columns=["chrom", "start", "end", "strand", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert bioframe.merge(df1).shape[0] == 4
assert bioframe.merge(df1)["start"].iloc[0] == 1
assert bioframe.merge(df1)["end"].iloc[0] == 12
assert bioframe.merge(df1, on=["strand"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[0] == df1.shape[0]
assert bioframe.merge(df1, on=["animal"]).shape[1] == df1.shape[1] + 1
assert checks.is_bedframe(bioframe.merge(df1, on=["strand", "animal"]))
def test_complement():
### complementing a df with no intervals in chrX by a view with chrX should return entire chrX region
df1 = pd.DataFrame(
[["chr1", 1, 5], ["chr1", 3, 8], ["chr1", 8, 10], ["chr1", 12, 14]],
columns=["chrom", "start", "end"],
)
df1_chromsizes = {"chr1": 100, "chrX": 100}
df1_complement = pd.DataFrame(
[
["chr1", 0, 1, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with two chromosomes ###
df1.iloc[0, 0] = "chrX"
df1_complement = pd.DataFrame(
[
["chr1", 0, 3, "chr1:0-100"],
["chr1", 10, 12, "chr1:0-100"],
["chr1", 14, 100, "chr1:0-100"],
["chrX", 0, 1, "chrX:0-100"],
["chrX", 5, 100, "chrX:0-100"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=df1_chromsizes), df1_complement
)
### test complement with no view_df and a negative interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-9223372036854775807"],
["chr1", 20, np.iinfo(np.int64).max, "chr1:0-9223372036854775807"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1), df1_complement)
### test complement with an overhanging interval
df1 = pd.DataFrame(
[["chr1", -5, 5], ["chr1", 10, 20]], columns=["chrom", "start", "end"]
)
chromsizes = {"chr1": 15}
df1_complement = pd.DataFrame(
[
["chr1", 5, 10, "chr1:0-15"],
],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(
bioframe.complement(df1, view_df=chromsizes, view_name_col="VR"), df1_complement
)
### test complement where an interval from df overlaps two different regions from view
### test complement with no view_df and a negative interval
df1 = pd.DataFrame([["chr1", 5, 15]], columns=["chrom", "start", "end"])
chromsizes = [("chr1", 0, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
df1_complement = pd.DataFrame(
[["chr1", 0, 5, "chr1p"], ["chr1", 15, 20, "chr1q"]],
columns=["chrom", "start", "end", "view_region"],
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
### test complement with NAs
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 5, 15], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
).astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(bioframe.complement(df1, chromsizes), df1_complement)
with pytest.raises(ValueError): # no NAs allowed in chromsizes
bioframe.complement(
df1, [("chr1", pd.NA, 9, "chr1p"), ("chr1", 11, 20, "chr1q")]
)
assert checks.is_bedframe(bioframe.complement(df1, chromsizes))
def test_closest():
df1 = pd.DataFrame(
[
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 4, 8], ["chr1", 10, 11]], columns=["chrom", "start", "end"]
)
### closest(df1,df2,k=1) ###
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 4 8 0"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### closest(df1,df2, ignore_overlaps=True)) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True)
)
### closest(df1,df2,k=2) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 4 8 0
1 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), k=2)
)
### closest(df2,df1) ###
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 4 8 chr1 1 5 0
1 chr1 10 11 chr1 1 5 5 """
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df2, df1, suffixes=("_1", "_2")))
### change first interval to new chrom ###
df2.iloc[0, 0] = "chrA"
d = """chrom start end chrom_ start_ end_ distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_": pd.Int64Dtype(),
"end_": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(df, bioframe.closest(df1, df2, k=1))
### test other return arguments ###
df2.iloc[0, 0] = "chr1"
d = """
index index_ have_overlap overlap_start overlap_end distance
0 0 0 True 4 5 0
1 0 1 False <NA> <NA> 5
"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(
df,
bioframe.closest(
df1,
df2,
k=2,
return_overlap=True,
return_index=True,
return_input=False,
return_distance=True,
),
check_dtype=False,
)
# closest should ignore empty groups (e.g. from categorical chrom)
df = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
d = """ chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chrX 1 8 chrX 2 10 0
1 chrX 2 10 chrX 1 8 0"""
df_closest = pd.read_csv(StringIO(d), sep=r"\s+")
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df = df.astype({"chrom": df_cat})
pd.testing.assert_frame_equal(
df_closest,
bioframe.closest(df, suffixes=("_1", "_2")),
check_dtype=False,
check_categorical=False,
)
# closest should ignore null rows: code will need to be modified
# as for overlap if an on=[] option is added
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 1, 5],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
d = """chrom_1 start_1 end_1 chrom_2 start_2 end_2 distance
0 chr1 1 5 chr1 10 11 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+").astype(
{
"start_1": pd.Int64Dtype(),
"end_1": pd.Int64Dtype(),
"start_2": pd.Int64Dtype(),
"end_2": pd.Int64Dtype(),
"distance": pd.Int64Dtype(),
}
)
pd.testing.assert_frame_equal(
df, bioframe.closest(df1, df2, suffixes=("_1", "_2"), ignore_overlaps=True, k=5)
)
with pytest.raises(ValueError): # inputs must be valid bedFrames
df1.iloc[0, 0] = "chr10"
bioframe.closest(df1, df2)
def test_coverage():
#### coverage does not exceed length of original interval
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chr1", 2, 10]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of interval on different chrom returns zero for coverage and n_overlaps
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame([["chrX", 3, 8]], columns=["chrom", "start", "end"])
d = """chrom start end coverage
0 chr1 3 8 0 """
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### when a second overlap starts within the first
df1 = pd.DataFrame([["chr1", 3, 8]], columns=["chrom", "start", "end"])
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8]], columns=["chrom", "start", "end"]
)
d = """chrom start end coverage
0 chr1 3 8 5"""
df = pd.read_csv(StringIO(d), sep=r"\s+")
pd.testing.assert_frame_equal(df, bioframe.coverage(df1, df2))
### coverage of NA interval returns zero for coverage
df1 = pd.DataFrame(
[
["chr1", 10, 20],
[pd.NA, pd.NA, pd.NA],
["chr1", 3, 8],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[["chr1", 3, 6], ["chr1", 5, 8], [pd.NA, pd.NA, pd.NA]],
columns=["chrom", "start", "end"],
)
df1 = bioframe.sanitize_bedframe(df1)
df2 = bioframe.sanitize_bedframe(df2)
df_coverage = pd.DataFrame(
[
["chr1", 10, 20, 0],
[pd.NA, pd.NA, pd.NA, 0],
["chr1", 3, 8, 5],
[pd.NA, pd.NA, pd.NA, 0],
],
columns=["chrom", "start", "end", "coverage"],
).astype(
{"start": pd.Int64Dtype(), "end": pd.Int64Dtype(), "coverage": pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(df_coverage, bioframe.coverage(df1, df2))
### coverage without return_input returns a single column dataFrame
assert (
bioframe.coverage(df1, df2, return_input=False)["coverage"].values
== np.array([0, 0, 5, 0])
).all()
def test_subtract():
### no intervals should be left after self-subtraction
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
assert len(bioframe.subtract(df1, df1)) == 0
### no intervals on chrX should remain after subtracting a longer interval
### interval on chr1 should be split.
### additional column should be propagated to children.
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 5, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### no intervals on chrX should remain after subtracting a longer interval
df2 = pd.DataFrame(
[["chrX", 0, 4], ["chr1", 6, 6], ["chrX", 4, 9]],
columns=["chrom", "start", "end"],
)
df1["animal"] = "sea-creature"
df_result = pd.DataFrame(
[["chr1", 4, 6, "sea-creature"], ["chr1", 6, 7, "sea-creature"]],
columns=["chrom", "start", "end", "animal"],
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
### subtracting dataframes funny column names
funny_cols = ["C", "chromStart", "chromStop"]
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=funny_cols,
)
df1["strand"] = "+"
assert len(bioframe.subtract(df1, df1, cols1=funny_cols, cols2=funny_cols)) == 0
funny_cols2 = ["chr", "st", "e"]
df2 = pd.DataFrame(
[
["chrX", 0, 18],
["chr1", 5, 6],
],
columns=funny_cols2,
)
df_result = pd.DataFrame(
[["chr1", 4, 5, "+"], ["chr1", 6, 7, "+"]],
columns=funny_cols + ["strand"],
)
df_result = df_result.astype(
{funny_cols[1]: pd.Int64Dtype(), funny_cols[2]: pd.Int64Dtype()}
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2, cols1=funny_cols, cols2=funny_cols2)
.sort_values(funny_cols)
.reset_index(drop=True),
)
# subtract should ignore empty groups
df1 = pd.DataFrame(
[
["chrX", 1, 8],
["chrX", 2, 10],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 1, 8],
],
columns=["chrom", "start", "end"],
)
df_cat = pd.CategoricalDtype(categories=["chrX", "chr1"], ordered=True)
df1 = df1.astype({"chrom": df_cat})
df_subtracted = pd.DataFrame(
[
["chrX", 8, 10],
],
columns=["chrom", "start", "end"],
)
assert bioframe.subtract(df1, df1).empty
pd.testing.assert_frame_equal(
df_subtracted.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2),
check_dtype=False,
check_categorical=False,
)
## test transferred from deprecated bioframe.split
df1 = pd.DataFrame(
[["chrX", 3, 8], ["chr1", 4, 7], ["chrX", 1, 5]],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame(
[
["chrX", 4],
["chr1", 5],
],
columns=["chrom", "pos"],
)
df2["start"] = df2["pos"]
df2["end"] = df2["pos"]
df_result = (
pd.DataFrame(
[
["chrX", 1, 4],
["chrX", 3, 4],
["chrX", 4, 5],
["chrX", 4, 8],
["chr1", 5, 7],
["chr1", 4, 5],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
)
pd.testing.assert_frame_equal(
df_result,
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# Test the case when a chromosome should not be split (now implemented with subtract)
df1 = pd.DataFrame(
[
["chrX", 3, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
df2 = pd.DataFrame([["chrX", 4]], columns=["chrom", "pos"])
df2["start"] = df2["pos"].values
df2["end"] = df2["pos"].values
df_result = (
pd.DataFrame(
[
["chrX", 3, 4],
["chrX", 4, 8],
["chr1", 4, 7],
],
columns=["chrom", "start", "end"],
)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True)
)
pd.testing.assert_frame_equal(
df_result.astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()}),
bioframe.subtract(df1, df2)
.sort_values(["chrom", "start", "end"])
.reset_index(drop=True),
)
# subtract should ignore null rows
df1 = pd.DataFrame(
[[pd.NA, pd.NA, pd.NA], ["chr1", 1, 5]],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df2 = pd.DataFrame(
[
["chrX", 1, 5],
[pd.NA, pd.NA, pd.NA],
["chr1", 4, 8],
[pd.NA, pd.NA, pd.NA],
["chr1", 10, 11],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
df_subtracted = pd.DataFrame(
[
["chr1", 1, 4],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
pd.testing.assert_frame_equal(df_subtracted, bioframe.subtract(df1, df2))
df1 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
assert len(bioframe.subtract(df1, df2)) == 0 # empty df1 but valid chroms in df2
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df1)
df2 = pd.DataFrame(
[
[pd.NA, pd.NA, pd.NA],
[pd.NA, pd.NA, pd.NA],
],
columns=["chrom", "start", "end"],
).astype({"start": pd.Int64Dtype(), "end": pd.Int64Dtype()})
with pytest.raises(ValueError): # no non-null chromosomes
bioframe.subtract(df1, df2)
def test_setdiff():
cols1 = ["chrom1", "start", "end"]
cols2 = ["chrom2", "start", "end"]
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=cols1 + ["strand", "animal"],
)
df2 = pd.DataFrame(
[
["chrX", 7, 10, "-", "dog"],
["chr1", 6, 10, "-", "cat"],
["chr1", 6, 10, "-", "cat"],
],
columns=cols2 + ["strand", "animal"],
)
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=None,
)
)
== 0
) # everything overlaps
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["animal"],
)
)
== 1
) # two overlap, one remains
assert (
len(
bioframe.setdiff(
df1,
df2,
cols1=cols1,
cols2=cols2,
on=["strand"],
)
)
== 2
) # one overlaps, two remain
# setdiff should ignore nan rows
df1 = pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])[
["chrom1", "start", "end", "strand", "animal"]
]
df1 = df1.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
df2 = pd.concat([pd.DataFrame([pd.NA]), df2, pd.DataFrame([pd.NA])])[
["chrom2", "start", "end", "strand", "animal"]
]
df2 = df2.astype(
{
"start": pd.Int64Dtype(),
"end": pd.Int64Dtype(),
}
)
assert (2, 5) == np.shape(bioframe.setdiff(df1, df1, cols1=cols1, cols2=cols1))
assert (2, 5) == np.shape(bioframe.setdiff(df1, df2, cols1=cols1, cols2=cols2))
assert (4, 5) == np.shape(
bioframe.setdiff(df1, df2, on=["strand"], cols1=cols1, cols2=cols2)
)
def test_count_overlaps():
df1 = pd.DataFrame(
[
["chr1", 8, 12, "+", "cat"],
["chr1", 8, 12, "-", "cat"],
["chrX", 1, 8, "+", "cat"],
],
columns=["chrom1", "start", "end", "strand", "animal"],
)
df2 = pd.DataFrame(
[
["chr1", 6, 10, "+", "dog"],
["chr1", 6, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
["chrX", 7, 10, "+", "dog"],
],
columns=["chrom2", "start2", "end2", "strand", "animal"],
)
assert (
bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 2, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([2, 0, 2])
).all()
assert (
bioframe.count_overlaps(
df1,
df2,
on=["strand", "animal"],
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)["count"].values
== np.array([0, 0, 0])
).all()
# overlaps with pd.NA
counts_no_nans = bioframe.count_overlaps(
df1,
df2,
on=None,
cols1=("chrom1", "start", "end"),
cols2=("chrom2", "start2", "end2"),
)
df1_na = (pd.concat([pd.DataFrame([pd.NA]), df1, pd.DataFrame([pd.NA])])).astype(
{
"start": pd.Int64Dtype(),
"end":
|
pd.Int64Dtype()
|
pandas.Int64Dtype
|
#!/usr/bin/python
# Imports
import pandas as pd
import numpy as np
from collections import Counter
import tqdm
import math, os
from sklearn.metrics import mean_squared_error
from scipy.sparse.csgraph import minimum_spanning_tree as mst_nsim
from scipy.sparse.linalg import svds
from scipy.sparse import csr_matrix
from scipy import sparse
import implicit
from .data import preprocess_binary
# Methods
def compute_rmse(preds, ground_truth):
grouped = pd.DataFrame({'count' : ground_truth.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
pred_values = []
real_values = []
for index, row in tqdm.tqdm(grouped.iterrows(), total=grouped.shape[0], position=0):
user_index = preds.index.tolist().index(row['user_nickname'])
town_index = preds.columns.tolist().index(row['town'])
#pred_values.append(predictions[(predictions.index==row['user_nickname'])][row['town']][0])
pred_values.append(preds.iloc[user_index,town_index])
real_values.append(float(row['count']))
rms = math.sqrt(mean_squared_error(real_values, pred_values))
return rms
def compute_precision_recall_N(PR, valid, N):
grouped_val = pd.DataFrame({'count' : valid.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
concat_preds = pd.DataFrame()
for interval in range(1000,PR.shape[0]+1000,1000):
flat_preds = pd.melt(PR.iloc[interval-1000:interval],
id_vars='user_nickname',
value_vars=PR.iloc[interval-1000:interval].columns, # list of days of the week
var_name='town',
value_name='predicted_count')
flat_preds['user_nickname'] = PR.iloc[interval-1000:interval].index.tolist() * len(PR.columns)
flat_preds = flat_preds[flat_preds.predicted_count >= 0.]
flat_preds = flat_preds.groupby('user_nickname')[['user_nickname','town','predicted_count']].apply(lambda grp: grp.nlargest(N,'predicted_count'))
concat_preds = pd.concat([concat_preds, flat_preds], axis=0)
tp, fp, fn = 0.,0.,0.
for user in tqdm.tqdm(grouped_val.user_nickname.unique().tolist(), total=len(grouped_val.user_nickname.unique().tolist()), position=0):
tmp_val_df = grouped_val[grouped_val.user_nickname==user]
if tmp_val_df.shape[0] != 0:
tmp_pr_towns = concat_preds[concat_preds.user_nickname==user].town.tolist()
tmp_val_towns = tmp_val_df.town.tolist()
for gt_town in tmp_val_towns:
if gt_town in tmp_pr_towns:
#print('TP')
tp+=1.
elif gt_town not in tmp_pr_towns:
#print('FN')
fn+=1.
for pr_town in tmp_pr_towns[:len(tmp_val_towns)]:
if pr_town not in tmp_val_towns:
fp+=1.
#print('FP')
return tp,fp,fn
def svd_model(user_item_df, latent_dimension, N):
Checkins_demeaned = user_item_df.values/np.mean(user_item_df.values)
U, sigma, Vt = svds(Checkins_demeaned, latent_dimension)
sigma = np.diag(sigma)
all_user_predicted_checkins = np.dot(np.dot(U, sigma), Vt) + np.mean(user_item_df.values)
preds = pd.DataFrame(all_user_predicted_checkins, columns = user_item_df.columns, index=user_item_df.index)
return preds
def implicit_model(user_item_df, train, validate, latent_dimension, N, preproccesing):
if preproccesing==2:
validate = pd.DataFrame({'count' : validate.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
validate['count'] = [1]*validate.shape[0]
train = pd.DataFrame({'count' : train.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
train['count'] = [1]*train.shape[0]
elif preproccesing==1:
validate = pd.DataFrame({'count' : validate.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
train = pd.DataFrame({'count' : train.groupby(['user_nickname','town'])['town'].apply(len)}).reset_index()
# initialize a model
model = implicit.als.AlternatingLeastSquares(factors=latent_dimension)
# train the model on a sparse matrix of item/user/confidence weights
#model.fit(csr_matrix(user_item_df.values.T))
# recommend items for a user
user_items = csr_matrix(user_item_df).T.tocsr()
# get top N recommendations
user_dict = dict(zip(list(range(len(user_item_df.index.tolist()))), user_item_df.index.tolist()))
user_dict_inverse = dict(zip(user_item_df.index.tolist(), list(range(len(user_item_df.index.tolist())))))
town_dict = dict(zip(list(range(len(user_item_df.columns.tolist()))), user_item_df.columns.tolist()))
town_dict_inverse = dict(zip(user_item_df.columns.tolist(), list(range(len(user_item_df.columns.tolist())))))
# recommend items for a user
user_items = csr_matrix(user_item_df).T.tocsr()
item_users = user_items.T
# train the model on a sparse matrix of item/user/confidence weights
print('Tranining model...')
model.fit(user_items, show_progress=True)
print('Computing RMSE for training set')
rmse_train = rmse_implicit(user_dict, town_dict_inverse, model, user_item_df, train, item_users)
print('Computing RMSE for validation set')
rmse_valid = rmse_implicit(user_dict, town_dict_inverse, model, user_item_df, validate, item_users)
print("Calculating precision, recall on training set")
precisionN_train, recallN_train = prec_recall_implicit(user_dict, town_dict_inverse, town_dict, model, train, item_users, N)
print("Calculating precision, recall on validation set")
precisionN_val, recallN_val = prec_recall_implicit(user_dict, town_dict_inverse, town_dict, model, validate, item_users, N)
# tp, fp, fn = 0., 0., 0.
# for userid in tqdm.tqdm(list(user_dict.keys())[:3000], total=len(list(user_dict.keys())[:3000]),position=0):
# recs = model.recommend(userid, item_users, N=N)
# gt = validate[validate.user_nickname==user_dict[userid]].town.unique().tolist()
# for enum, recomendation in enumerate(recs):
# if enum == len(gt): break
# if town_dict[recomendation[0]] in gt:
# tp += 1.
# elif town_dict[recomendation[0]] not in gt:
# fp += 1.
# for real in gt:
# if town_dict_inverse[real] not in [r[0] for r in recs]:
# fn += 1.
# print('tp:{}, fp:{}, fn:{}'.format(tp,fp,fn))
# precisionN = tp/(tp+fp)
# recallN = tp/(tp+fn)
return model, precisionN_train, recallN_train, precisionN_val, recallN_val, rmse_train, rmse_valid
def prec_recall_implicit(user_dict, town_dict_inverse, town_dict, model, validate, item_users, N):
tp, fp, fn = 0., 0., 0.
for userid in tqdm.tqdm(list(user_dict.keys())[:3000], total=len(list(user_dict.keys())[:3000]),position=0):
recs = model.recommend(userid, item_users, N=N)
gt = validate[validate.user_nickname==user_dict[userid]].town.unique().tolist()
for enum, recomendation in enumerate(recs):
if enum == len(gt): break
if town_dict[recomendation[0]] in gt:
tp += 1.
elif town_dict[recomendation[0]] not in gt:
fp += 1.
for real in gt:
if town_dict_inverse[real] not in [r[0] for r in recs]:
fn += 1.
print('tp:{}, fp:{}, fn:{}'.format(tp,fp,fn))
precisionN = tp/(tp+fp)
recallN = tp/(tp+fn)
return precisionN, recallN
def rmse_implicit(user_dict, town_dict_inverse, model, user_item_df, validate, item_users):
rmse_pred, rmse_gt = [],[]
for userid in list(user_dict.keys())[:3000]:
recs = model.recommend(userid, item_users, N=len(user_item_df.columns.tolist()))
gt = validate[validate.user_nickname==user_dict[userid]]#.town.unique().tolist()
if len(gt)==0: continue
for ind, row in gt.iterrows():
try:
pred_val = recs[[r[0] for r in recs].index(town_dict_inverse[row['town']])][1]
except:
print(ValueError, row['town'])
continue
rmse_gt.append(row['count'])
rmse_pred.append(pred_val)
rmse = math.sqrt(mean_squared_error(rmse_gt, rmse_pred))
return rmse
# Class
class locationRec(object):
"""DocString"""
def __init__(self):
self.user_item_df = None
self.train = None
self.validate = None
self.test = None
self.preds = None
self.rmse_train = None
self.rmse_val = None
self.rmse_test = None
self.precision_train = None
self.recall_train = None
self.precision_val = None
self.recall_val = None
self.precision_test = None
self.recall_test = None
self.model = None
self.preproccesing = None
def datapipeline(self, preproccesing=1):
self.preproccesing = preproccesing
if preproccesing==1:
self.user_item_df = pd.read_pickle('User_Item.pckl')
self.train = pd.read_pickle('train.pckl')
self.validate = pd.read_pickle('validate.pckl')
self.test = pd.read_pickle('test.pckl')
if preproccesing==2:
if os.path.isfile('User_Item2.pckl') and os.path.isfile('train2.pckl') and os.path.isfile('validate2.pckl') and os.path.isfile('test2.pckl'):
pass
else:
preprocess_binary()
self.user_item_df = pd.read_pickle('User_Item2.pckl')
self.train = pd.read_pickle('train2.pckl')
self.validate =
|
pd.read_pickle('validate2.pckl')
|
pandas.read_pickle
|
#!/usr/bin/env python3
import argparse
import glob
import numpy as np
import os
import pandas as pd
import quaternion
import sys
import trimesh
import json
from tqdm import tqdm
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
__dir__ = os.path.normpath(
os.path.join(
os.path.dirname(os.path.realpath(__file__)), '..')
)
sys.path[1:1] = [__dir__]
top_classes = {
"03211117": "display", "04379243": "table",
"02747177": "trashbin", "03001627": "chair",
# "04256520": "sofa", "02808440": "bathtub",
"02933112": "cabinet", "02871439": "bookshelf"
}
from shapefit.utils.utils import get_validation_appearance, get_symmetries, get_gt_dir, \
get_scannet, get_shapenet, make_M_from_tqs, make_tqs_from_M
# helper function to calculate difference between two quaternions
def calc_rotation_diff(q, q00):
rotation_dot = np.dot(quaternion.as_float_array(q00), quaternion.as_float_array(q))
rotation_dot_abs = np.abs(rotation_dot)
try:
error_rotation_rad = 2 * np.arccos(rotation_dot_abs)
except:
return 0.0
error_rotation = np.rad2deg(error_rotation_rad)
return error_rotation
def rotation_error(row):
q = quaternion.quaternion(*row[:4])
q_gt = quaternion.quaternion(*row[4:8])
sym = row[-1]
if sym == "__SYM_ROTATE_UP_2":
m = 2
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
elif sym == "__SYM_ROTATE_UP_4":
m = 4
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
elif sym == "__SYM_ROTATE_UP_INF":
m = 36
tmp = [
calc_rotation_diff(q, q_gt * quaternion.from_rotation_vector([0, (i * 2.0 / m) * np.pi, 0]))
for i in range(m)]
return np.min(tmp)
else:
return calc_rotation_diff(q, q_gt)
def print_to_(verbose, log_file, string):
if verbose:
print(string)
sys.stdout.flush()
with open(log_file, 'a+') as f:
f.write(string + '\n')
def get_init_mesh(scan_id, key):
path = glob.glob(os.path.join(
'/home/ishvlad/workspace/Scan2CAD/MeshDeformation/ARAP/',
'arap_output_GT', scan_id, key + '*', 'init.obj'
))
if len(path) == 0:
return None
return trimesh.load_mesh(path[0], process=False)
def DAME(mesh_1, mesh_2, k=0.59213):
def dihedral(mesh):
unique_faces, _ = np.unique(np.sort(mesh.faces, axis=1), axis=0, return_index=True)
parts_bitriangles_map = []
bitriangles = {}
for face in unique_faces:
edge_1 = tuple(sorted([face[0], face[1]]))
if edge_1 not in bitriangles:
bitriangles[edge_1] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_1].add(face[2])
edge_2 = tuple(sorted([face[1], face[2]]))
if edge_2 not in bitriangles:
bitriangles[edge_2] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_2].add(face[0])
edge_3 = tuple(sorted([face[0], face[2]]))
if edge_3 not in bitriangles:
bitriangles[edge_3] = set([face[0], face[1], face[2]])
else:
bitriangles[edge_3].add(face[1])
bitriangles_aligned = np.empty((len(mesh.edges_unique), 4), dtype=int)
for j, edge in enumerate(mesh.edges_unique):
bitriangle = [*sorted(edge)]
bitriangle += [x for x in list(bitriangles[tuple(sorted(edge))]) if x not in bitriangle]
bitriangles_aligned[j] = bitriangle
vertices_bitriangles_aligned = mesh.vertices[bitriangles_aligned]
normals_1 = np.cross((vertices_bitriangles_aligned[:, 2] - vertices_bitriangles_aligned[:, 0]),
(vertices_bitriangles_aligned[:, 2] - vertices_bitriangles_aligned[:, 1]))
normals_1 = normals_1 / np.sqrt(np.sum(normals_1 ** 2, axis=1)[:, None])
normals_2 = np.cross((vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 0]),
(vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 1]))
normals_2 = normals_2 / np.sqrt(np.sum(normals_2 ** 2, axis=1)[:, None])
n1_n2_arccos = np.arccos(np.sum(normals_1 * normals_2, axis=1).clip(-1, 1))
n1_n2_signs = np.sign(
np.sum(normals_1 * (vertices_bitriangles_aligned[:, 3] - vertices_bitriangles_aligned[:, 1]), axis=1))
D_n1_n2 = n1_n2_arccos * n1_n2_signs
return D_n1_n2
D_mesh_1 = dihedral(mesh_1)
D_mesh_2 = dihedral(mesh_2)
mask_1 = np.exp((k * D_mesh_1) ** 2)
per_edge = np.abs(D_mesh_1 - D_mesh_2) * mask_1
result = np.sum(per_edge) / len(mesh_1.edges_unique)
return result, per_edge
def calc_ATSD(dists, border):
return np.minimum(dists.min(1), border).mean()
def calc_F1(dists, border):
return np.sum(dists.min(1) < border) / len(dists)
def calc_CD(dists, border):
return max(np.minimum(dists.min(1), border).mean(), np.minimum(dists.min(0), border).mean())
def calc_metric(scan_mesh, shape_mesh, method='all', border=0.1):
area = border * 2
# get scan bbox
bbox = np.array([shape_mesh.vertices.min(0), shape_mesh.vertices.max(0)])
bbox += [[-area], [area]]
batch = np.array([np.diag(bbox[0]), np.diag(bbox[1]), np.eye(3), -np.eye(3)])
slice_mesh = scan_mesh.copy()
# xyz
for i in range(3):
slice_mesh = slice_mesh.slice_plane(batch[0, i], batch[2, i])
slice_mesh = slice_mesh.slice_plane(batch[1, i], batch[3, i])
if len(slice_mesh.vertices) == 0:
if method == 'all':
return {'ATSD': border, 'CD': border, 'F1': 0.0}
else:
return border
scan_vertices = np.array(slice_mesh.vertices)
if len(scan_vertices) > 20000:
scan_vertices = scan_vertices[::len(scan_vertices) // 20000]
dists = cdist(np.array(shape_mesh.vertices), scan_vertices, metric='minkowski', p=1)
if method == 'ATSD':
return calc_ATSD(dists, border)
elif method == 'CD':
return calc_CD(dists, border)
elif method == 'F1':
return calc_F1(dists, border)
else:
return {
'ATSD': calc_ATSD(dists, border),
'CD': calc_CD(dists, border),
'F1': calc_F1(dists, border),
}
def metric_on_deformation(options):
output_name = options.output_name + '_' + str(options.border) + \
'_' + str(options.val_set) + '_' + str(options.metric_type)
if options.output_type == 'align':
# load needed models
appearance = get_validation_appearance(options.val_set)
# LOAD list of all aligned scenes
csv_files = glob.glob(os.path.join(options.input_dir, '*.csv'))
scenes = [x.split('/')[-1][:-4] for x in csv_files]
# Which scenes do we want to calculate?
scenes = np.intersect1d(scenes, list(appearance.keys()))
batch = []
for s in scenes:
df_scan = pd.read_csv(
os.path.join(options.input_dir, s + '.csv'),
index_col=0, dtype={'objectCategory': str}
)
# Filter: take only objects from appearance
df_scan['key'] = df_scan.objectCategory + '_' + df_scan.alignedModelId
df_scan = df_scan[np.in1d(df_scan['key'].values, list(appearance[s].keys()))]
batch.extend([{
'scan_id': s,
'key': row['key'],
'objectCategory': row['objectCategory'],
'alignedModelId': row['alignedModelId'],
'path': 'path to origin ShapeNet mesh',
'object_num': i,
'T': [row['tx'], row['ty'], row['tz']],
'Q': [row['qw'], row['qx'], row['qy'], row['qz']],
'S': [row['sx'], row['sy'], row['sz']]
} for i, row in df_scan.iterrows()])
df = pd.DataFrame(batch)
else:
# LOAD list of all aligned scenes
in_files = glob.glob(os.path.join(options.input_dir, 'scene*/*/approx.obj'))
if len(in_files) == 0:
in_files = glob.glob(os.path.join(options.input_dir, '*/scene*/*/approx.obj'))
info = []
for x in in_files:
parts = x.split('/')[-3:-1]
if len(parts[1].split('_')) == 3:
category_id, shape_id, object_num = parts[1].split('_')
else:
category_id, shape_id = parts[1].split('_')
object_num = -1
row = [
parts[0], # scan_id
category_id + '_' + shape_id, # key
category_id,
shape_id,
object_num,
x, # path
]
info.append(row)
df = pd.DataFrame(info, columns=['scan_id', 'key', 'objectCategory', 'alignedModelId', 'object_num', 'path'])
transform_files = ['/'.join(x.split('/')[:-1]) + '/transform.json' for x in in_files]
Ts, Qs, Ss = [], [], []
for f in transform_files:
if os.path.exists(f):
matrix = np.array(json.load(open(f, 'rb'))['transform'])
else:
Ts.append(None)
Qs.append(None)
Ss.append(None)
continue
t, q, s = make_tqs_from_M(matrix)
q = quaternion.as_float_array(q)
Ts.append(t)
Qs.append(q)
Ss.append(s)
df['T'] = Ts
df['Q'] = Qs
df['S'] = Ss
metrics = {}
batch = df.groupby('scan_id')
if options.verbose:
batch = tqdm(batch, desc='Scenes')
# CALCULATE METRICS
for scan_id, df_scan in batch:
scan_mesh = get_scannet(scan_id, 'mesh')
scan_batch = df_scan.iterrows()
if options.verbose:
scan_batch = tqdm(scan_batch, total=len(df_scan), desc='Shapes', leave=False)
for i, row in scan_batch:
if options.output_type == 'align':
shape_mesh = get_shapenet(row['objectCategory'], row['alignedModelId'], 'mesh')
else:
try:
shape_mesh = trimesh.load_mesh(row['path'])
except Exception:
metrics[i] = {'ATSD': np.nan, 'CD': np.nan, 'F1': np.nan}
continue
if row['T'] is None:
metrics[i] = {'ATSD': np.nan, 'CD': np.nan, 'F1': np.nan}
continue
T = make_M_from_tqs(row['T'], row['Q'], row['S'])
shape_mesh.apply_transform(T)
metrics[i] = calc_metric(scan_mesh, shape_mesh, border=options.border)
df_final = df.merge(
|
pd.DataFrame(metrics)
|
pandas.DataFrame
|
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from gators.feature_generation.plane_rotation import PlaneRotation
ks.set_option("compute.default_index_type", "distributed-sequence")
@pytest.fixture
def data():
X = pd.DataFrame(
[[200.0, 140.0, 100.0], [210.0, 160.0, 125.0]], columns=list("XYZ")
)
X_expected = pd.DataFrame(
{
"X": {0: 200.0, 1: 210.0},
"Y": {0: 140.0, 1: 160.0},
"Z": {0: 100.0, 1: 125.0},
"XY_x_45deg": {0: 42.42640687119287, 1: 35.35533905932739},
"XY_y_45deg": {0: 240.41630560342614, 1: 261.62950903902254},
"XY_x_60deg": {0: -21.243556529821376, 1: -33.56406460551014},
"XY_y_60deg": {0: 243.20508075688775, 1: 261.8653347947321},
"XZ_x_45deg": {0: 70.71067811865477, 1: 60.104076400856556},
"XZ_y_45deg": {0: 212.13203435596424, 1: 236.8807716974934},
"XZ_x_60deg": {0: 13.397459621556166, 1: -3.253175473054796},
"XZ_y_60deg": {0: 223.20508075688775, 1: 244.36533479473212},
}
)
obj = PlaneRotation(columns=[["X", "Y"], ["X", "Z"]], theta_vec=[45, 60]).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32():
X = pd.DataFrame([[200, 140, 100], [210, 160, 125]], columns=list("XYZ")).astype(
np.float32
)
X_expected = pd.DataFrame(
{
"X": {0: 200.0, 1: 210.0},
"Y": {0: 140.0, 1: 160.0},
"Z": {0: 100.0, 1: 125.0},
"XY_x_45deg": {0: 42.42640687119287, 1: 35.35533905932739},
"XY_y_45deg": {0: 240.41630560342614, 1: 261.62950903902254},
"XY_x_60deg": {0: -21.243556529821376, 1: -33.56406460551014},
"XY_y_60deg": {0: 243.20508075688775, 1: 261.8653347947321},
"XZ_x_45deg": {0: 70.71067811865477, 1: 60.104076400856556},
"XZ_y_45deg": {0: 212.13203435596424, 1: 236.8807716974934},
"XZ_x_60deg": {0: 13.397459621556166, 1: -3.253175473054796},
"XZ_y_60deg": {0: 223.20508075688775, 1: 244.36533479473212},
}
).astype(np.float32)
obj = PlaneRotation(
columns=[["X", "Y"], ["X", "Z"]], theta_vec=[45, 60], dtype=np.float32
).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_ks():
X = ks.DataFrame(
[[200.0, 140.0, 100.0], [210.0, 160.0, 125.0]], columns=list("XYZ")
)
X_expected = pd.DataFrame(
{
"X": {0: 200.0, 1: 210.0},
"Y": {0: 140.0, 1: 160.0},
"Z": {0: 100.0, 1: 125.0},
"XY_x_45deg": {0: 42.42640687119287, 1: 35.35533905932739},
"XY_y_45deg": {0: 240.41630560342614, 1: 261.62950903902254},
"XY_x_60deg": {0: -21.243556529821376, 1: -33.56406460551014},
"XY_y_60deg": {0: 243.20508075688775, 1: 261.8653347947321},
"XZ_x_45deg": {0: 70.71067811865477, 1: 60.104076400856556},
"XZ_y_45deg": {0: 212.13203435596424, 1: 236.8807716974934},
"XZ_x_60deg": {0: 13.397459621556166, 1: -3.253175473054796},
"XZ_y_60deg": {0: 223.20508075688775, 1: 244.36533479473212},
}
)
obj = PlaneRotation(columns=[["X", "Y"], ["X", "Z"]], theta_vec=[45, 60]).fit(X)
return obj, X, X_expected
@pytest.fixture
def data_float32_ks():
X = ks.DataFrame([[200, 140, 100], [210, 160, 125]], columns=list("XYZ")).astype(
np.float32
)
X_expected = pd.DataFrame(
{
"X": {0: 200.0, 1: 210.0},
"Y": {0: 140.0, 1: 160.0},
"Z": {0: 100.0, 1: 125.0},
"XY_x_45deg": {0: 42.42640687119287, 1: 35.35533905932739},
"XY_y_45deg": {0: 240.41630560342614, 1: 261.62950903902254},
"XY_x_60deg": {0: -21.243556529821376, 1: -33.56406460551014},
"XY_y_60deg": {0: 243.20508075688775, 1: 261.8653347947321},
"XZ_x_45deg": {0: 70.71067811865477, 1: 60.104076400856556},
"XZ_y_45deg": {0: 212.13203435596424, 1: 236.8807716974934},
"XZ_x_60deg": {0: 13.397459621556166, 1: -3.253175473054796},
"XZ_y_60deg": {0: 223.20508075688775, 1: 244.36533479473212},
}
).astype(np.float32)
obj = PlaneRotation(
columns=[["X", "Y"], ["X", "Z"]], theta_vec=[45, 60], dtype=np.float32
).fit(X)
return obj, X, X_expected
def test_pd(data):
obj, X, X_expected = data
X_new = obj.transform(X)
assert_frame_equal(X_new, X_expected)
@pytest.mark.koalas
def test_ks(data_ks):
obj, X, X_expected = data_ks
X_new = obj.transform(X)
assert_frame_equal(X_new.to_pandas(), X_expected)
def test_pd_np(data):
obj, X, X_expected = data
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
assert np.allclose(X_new, X_expected)
@pytest.mark.koalas
def test_ks_np(data_ks):
obj, X, X_expected = data_ks
X_numpy_new = obj.transform_numpy(X.to_numpy())
X_new = pd.DataFrame(X_numpy_new)
assert np.allclose(X_new, X_expected)
def test_float32_pd(data_float32):
obj, X, X_expected = data_float32
X_new = obj.transform(X)
|
assert_frame_equal(X_new, X_expected)
|
pandas.testing.assert_frame_equal
|
import streamlit as st
import numpy as np
import pandas as pd # TODO this should be covered by the DataManager
import xarray as xr # TODO this should be covered by the DataManager
import matplotlib.pyplot as plt # TODO this should be moved into plotting submodule
from ruins import components
def load_alldata():
weather = xr.load_dataset('data/weather.nc')
climate = xr.load_dataset('data/cordex_coast.nc')
# WARNING - bug fix for now:
# 'HadGEM2-ES' model runs are problematic and will be removed for now
# The issue is with the timestamp and requires revision of the ESGF reading routines
kys = [s for s in list(climate.keys()) if 'HadGEM2-ES' not in s] #remove all entries of HadGEM2-ES (6 entries)
climate = climate[kys]
return weather, climate
def applySDM(wdata, data, meth='rel', cdf_threshold=0.9999999, lower_limit=0.1):
'''apply structured distribution mapping to climate data and return unbiased version of dataset'''
from sdm import SDM
data_ub = data.copy()
for k in data_ub.columns:
data_col = data_ub[k].dropna()
overlapx = pd.concat(
[wdata.loc[data_col.index[0]:wdata.index[-1]], data_col.loc[data_col.index[0]:wdata.index[-1]]], axis=1)
overlapx.columns = ['obs', 'cm']
overlapx = overlapx.dropna()
try:
data_ub[k] = SDM(overlapx.obs, overlapx.cm, data_col, meth, cdf_threshold, lower_limit)
except:
data_ub[k] = data_ub[k] * np.nan
data_ub[data_ub == 0.0000000] = np.nan
data_ub = data_ub.loc[data_ub.index[0]:pd.to_datetime('2099-12-31 23:59:59')]
return data_ub
def ub_climate(cdata, wdata, ub=True):
varis = ['T', 'Tmax', 'Tmin', 'aP', 'Prec', 'RH', 'Rs', 'u2', 'EToHG']
firstitem = True
for vari in varis:
data = cdata.sel(vars=vari).to_dataframe()
data = data[data.columns[data.columns != 'vars']]
if (vari == 'T') | (vari == 'Tmax') | (vari == 'Tmin'):
meth = 'abs'
else:
meth = 'rel'
if ub:
wdatax = wdata.sel(vars=vari).to_dataframe().iloc[:, -1].dropna()
data_ub = applySDM(wdatax, data, meth=meth)
else:
data_ub = data
data_ubx = data_ub.mean(axis=1)
data_ubx.columns = [vari]
if firstitem:
data_ubc = data_ubx
firstitem = False
else:
data_ubc =
|
pd.concat([data_ubc, data_ubx], axis=1)
|
pandas.concat
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests, re, smtplib, time
from bs4 import BeautifulSoup
import pandas as pd
from itertools import zip_longest
from email.message import EmailMessage
#Part 1: Gather the information
#Get the trivia questions
url = 'https://www.opinionstage.com/blog/trivia-questions/'
html_text = requests.get(url).text
soup=BeautifulSoup(html_text,'html.parser')
#Print a response code --> 200 means we've accessed the page
print("Response Code: {}".format(requests.get(url)))
#Get the questions from this page
questions = [things.text for item in soup.find_all('p') for things in item.find_all('strong') if re.search("^\d+.\s*",things.text)]
#Get the answers
answers = [item.text for item in soup.find_all('p') if "Answer:" in item.text]
'''
Turns out in this list, there's one answer that isn't formatted like "Answer: blah blah blah
Figured this out by checking the length of questions and answers (249, 248)
Then i did list(zip(questions[:249],answers)) and compared the questions and answers against what
was on the page. Not that hard to do, it's in order so you just have to see where the answer doesn't
make sense. That's where the issue will be
'''
'''
It was for the question: What does DC stand for
Detective Comics
Should be in the 69th index of answers, so we can insert it here
'''
answers.insert(69, "Detective Comics")
#Zip the questions and answers together
QA = list(zip(questions,answers))
#Save it to a csv file
pd.DataFrame(QA).to_csv('Questions_and_Answers.csv',index=False)
#Part 2: Get 5 Random Trivia questions and email it to me
#Read csv and turn it into a list of lists.
Q_and_A =
|
pd.read_csv('Questions_and_Answers.csv')
|
pandas.read_csv
|
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import range
import pandas as pd
from pandas import (
Categorical, DataFrame, Index, NaT, Series, bdate_range, date_range, isna)
from pandas.core import ops
import pandas.core.nanops as nanops
import pandas.util.testing as tm
from pandas.util.testing import (
assert_almost_equal, assert_frame_equal, assert_series_equal)
from .common import TestData
class TestSeriesLogicalOps(object):
@pytest.mark.parametrize('bool_op', [operator.and_,
operator.or_, operator.xor])
def test_bool_operators_with_nas(self, bool_op):
# boolean &, |, ^ should work with object arrays and propagate NAs
ser = Series(bdate_range('1/1/2000', periods=10), dtype=object)
ser[::2] = np.nan
mask = ser.isna()
filled = ser.fillna(ser[0])
result = bool_op(ser < ser[9], ser > ser[3])
expected = bool_op(filled < filled[9], filled > filled[3])
expected[mask] = False
assert_series_equal(result, expected)
def test_operators_bitwise(self):
# GH#9016: support bitwise op for integer types
index = list('bca')
s_tft = Series([True, False, True], index=index)
s_fff = Series([False, False, False], index=index)
s_tff = Series([True, False, False], index=index)
s_empty = Series([])
# TODO: unused
# s_0101 = Series([0, 1, 0, 1])
s_0123 = Series(range(4), dtype='int64')
s_3333 = Series([3] * 4)
s_4444 = Series([4] * 4)
res = s_tft & s_empty
expected = s_fff
assert_series_equal(res, expected)
res = s_tft | s_empty
expected = s_tft
assert_series_equal(res, expected)
res = s_0123 & s_3333
expected = Series(range(4), dtype='int64')
|
assert_series_equal(res, expected)
|
pandas.util.testing.assert_series_equal
|
import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, PairNorm
from torch_geometric.utils.undirected import to_undirected
import random
import matplotlib.pyplot as plt
data_name = 'citeseer' # 'cora' or 'citeseer'
data_edge_path = f'datasets/{data_name}/{data_name}.cites'
data_content_path = f'datasets/{data_name}/{data_name}.content'
raw_content =
|
pd.read_table(data_content_path, header=None, dtype={0:np.str})
|
pandas.read_table
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 14 14:26:14 2019
@author: ranahamzaintisar
"""
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy.linalg import svd
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import accuracy_score as acc
import random
from sklearn.utils import shuffle
'''functions built'''
## Function to split into training and test dataset and create target vector z:
def training_test_split(dataset):
split_data = np.array_split(dataset,10)
training =[]
test = []
for i in range(len(split_data)):
np.random.shuffle(split_data[i])
train_test_split = np.array_split(split_data[i],2)
for item in train_test_split[0]:
if i == 0:
new = np.append(item,10) #class label 10 for digit 0
training.append(new)
else:
new = np.append(item,i) # class labels for other digits
training.append(new)
for item in train_test_split[1]:
if i == 0:
new = np.append(item,10)
test.append(new)
else:
new = np.append(item,i)
test.append(new)
# Training dataset with target vector Z
training_dataset = pd.DataFrame(training)
training_dataset[240] = training_dataset[240].astype('category') # make class label as category
##create dummy variables for the categorical variable i.e target vectors
training_dataset = pd.get_dummies(training_dataset, dummy_na=True, prefix_sep='_' )
## drop nan dummy columns if created
training_dataset = training_dataset.loc[:, training_dataset.nunique(axis=0) > 1]
# Test dataset with target vector Z
test_dataset = pd.DataFrame(test)
test_dataset[240] = test_dataset[240].astype('category') # make class label as category
##create dummy variables for the categorical variable i.e target vectors
test_dataset = pd.get_dummies(test_dataset, dummy_na=True, prefix_sep='_' )
## drop nan dummy columns if created
test_dataset = test_dataset.loc[:, test_dataset.nunique(axis=0) > 1]
return training_dataset , test_dataset
## function to seperate feature vectors from binary target vectors
def split_features_labels(data):
label_col = [x for x in data.columns if isinstance(x, str)]
return (data.drop(label_col, axis=1),
data[label_col])
def split_features_labels_cv(data):
label_col = [x for x in data.columns if x>239]
return (data.drop(label_col, axis=1),
data[label_col])
## function to center the data
def center(df):
cols = df.columns
for field in cols:
mean_field = df[field].mean()
# account for constant columns
if np.all(df[field] - mean_field != 0):
df.loc[:, field] = (df[field] - mean_field)
return df
## Function to find coorelation matrix of the centered data point:
def coor_c(df):
df_matrix = df.as_matrix()
df_matrix_transpose = df_matrix.transpose()
coor_matrix = np.dot(df_matrix_transpose,df_matrix)
n = coor_matrix.shape[1]
normal_coor_matrix = np.multiply(coor_matrix,1/n)
return normal_coor_matrix
##Function Computing the eigenvalues and right eigenvectors of coorelation matrix.
#and returning them in decending order
def eigen(coor_matrix):
#compute the eigen vector and values
eig_val_cov, eig_vec_cov = np.linalg.eig(coorelation_matrix_train )
## sort eigen vector and eigen values from high to low
# Make a list of (eigenvalue, eigenvector) tuples
eig_pairs = [(np.abs(eig_val_cov[i]), eig_vec_cov[:,i]) for i in range(len(eig_val_cov))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eig_pairs.sort(key=lambda x: x[0], reverse=True)
#seperate the sorted pair
eigen_val_decending =[]
for i in eig_pairs:
eigen_val_decending.append(i[0])
eigen_vec_decending = []
for i in eig_pairs:
eigen_vec_decending.append(i[1])
return eigen_val_decending,eigen_vec_decending
## function to reaturn number of desiered PC features and padded with Bias
def pc_features(eigen_vec,eigen_val,centered_data,num_pc):
s_pc = num_pc
pc_vectors = np.stack(eigen_vec[0:s_pc],axis=0)
pc_eigen_val = np.stack(eigen_val[0:s_pc],axis=0)
pc_features = np.dot(pc_vectors,centered_data.as_matrix().transpose()).transpose()
#add bias to the features:
feat_df= pd.DataFrame(pc_features)
bias = np.full(pc_features.shape[0],1)
feat_df['bias']=bias
features = feat_df.as_matrix()
return features,pc_eigen_val
## Ridge regression function using formula 39 ML notes
def ridge_reg(features,target,a):
##computing the SVD
semi_def_matrix = np.dot(features,features.transpose())
target_matrix = target.as_matrix()
num_data=semi_def_matrix.shape[0]
identity_matrix = np.identity(num_data)
alpha = a
alpha_sq= alpha**2
r_mat = alpha_sq*identity_matrix
ridge_matrix = semi_def_matrix+r_mat
ridge_matrix_inv = np.linalg.inv(ridge_matrix)
wopt_inv= np.matmul(np.matmul(ridge_matrix_inv,features).transpose(),target_matrix)
wopt = wopt_inv.transpose()
## use the wopt to find hypothesis vectors
hypothesis_matrix = np.matmul(wopt,features.transpose()).transpose()
## use hypothesis vectors to find prediction
prediction = []
for row in hypothesis_matrix:
pred = np.zeros_like(row,dtype='int')
index = np.argmax(row)
pred[index]=1
prediction.append(pred)
df_pred = pd.DataFrame(prediction)
pred_matrix = df_pred.as_matrix()
return pred_matrix , target_matrix
def misclass_rate(pred,actual):
return 1-((sum(np.array([np.argmax(a) for a in pred])==np.array([np.argmax(a) for a in actual]))).astype("float")/len(actual))
def meansq_error(pred,actual):
return np.mean((pred - actual)**2)
##cross validation with alpha
def cv_ridge(dataset,no_fold,tune_grid,numdim):
#take the training dataframe with the target vectors
cv_df = dataset.copy()
# make k fold splits
a = []
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for alpha in a:
k=5
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
#center the feature vectors for PCA
centered_train = center(feature_train)
centered_test = center(feature_val)
#find the coorelation matrix (240,240) matrix size
coorelation_matrix_train = coor_c(centered_train)
# Find the eigenvectors and eigen values of the coorelation matrix
eig_val,eig_vec = eigen(coorelation_matrix_train)
# number of PCA features selected=20
# compute the projections of original image vectors in the selected PC directions
feat,pc_eigen_val = pc_features(eig_vec,eig_val,centered_train,numdim)
feat_val,pc_eig_v = pc_features(eig_vec,eig_val,centered_test,numdim)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(feat_val,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
def cv_ridge_kmeans(dataset,no_fold,tune_grid,cnum):
cv_df = dataset.copy()
# make k fold splits
a = tune_grid
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for alpha in a:
k = no_fold
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv = pd.DataFrame(cv_new[train_indices])
test_cv = pd.DataFrame(cv_new[test_indices])
##fit the model on training data
#split into intitial fetures and target vectors
feature_train,target_train = split_features_labels_cv(train_cv)
feature_val,target_val= split_features_labels_cv(test_cv)
# use the Kmeans for feature selection
new_feat = kmeans_algorithm(feature_train.as_matrix(),cnum)
new_feat_v = kmeans_algorithm(feature_val.as_matrix(),cnum)
## run the ridge regression on and compute MSEtrain and MISStrain
# ridge regression
reg_pred_train, reg_target_train = ridge_reg(new_feat,target_train,alpha)
#MSE
mse_train = meansq_error(reg_pred_train,reg_target_train)
mse_tr.append(mse_train)
#MISS
miss_train = misclass_rate(reg_pred_train,reg_target_train)
m_tr.append(miss_train)
#Predict for validation set
#fit the ridge reg model
reg_pred_val, reg_target_val = ridge_reg(new_feat_v,target_val,alpha)
#MSE
ms_val = meansq_error(reg_pred_val,reg_target_val)
mse_val.append(ms_val)
#MISS
miss_val = misclass_rate(reg_pred_val,reg_target_val)
m_val.append(miss_val)
mse_tr_a.append(np.mean(mse_tr))
m_tr_a.append(np.mean(m_tr))
mse_val_a.append(np.mean(mse_val))
m_val_a.append(np.mean(m_val))
return mse_tr_a,m_tr_a,mse_val_a,m_val_a
### crossvalidation with feature number(PCA features)
def cv_features(dataset,no_fold,tune_grid,alpha):
cv_df = dataset.copy()
a = tune_grid
mse_tr_a = []
m_tr_a=[]
mse_val_a = []
m_val_a =[]
for dimnum in a:
k = no_fold
num_dig = int(cv_df.shape[0])
size = int(num_dig/k)
mse_tr =[]
m_tr=[]
mse_val = []
m_val = []
for i in range (k):
cv_new = shuffle(cv_df.values)
test_indices = [x for x in range(i*size,size+(i*size))]
train_indices = range(0,num_dig)
#remove the test indices from the train set
train_indices = [x for x in train_indices if x not in test_indices]
train_cv =
|
pd.DataFrame(cv_new[train_indices])
|
pandas.DataFrame
|
"""
Tests for the ItemList class.
"""
import os
import pandas as pd
import pytest
import textwrap
from .context import tohu
from tohu.item_list import ItemList
from tohu.custom_generator import CustomGenerator
from tohu.generators import SelectOne, Float, HashDigest, Integer, Sequential, Timestamp
class TestItemList:
@pytest.mark.parametrize("items, N", [
([42, 23, "Hello", 12, "foobar"], 5),
(range(100), 100),
])
def test_converting_item_list_to_list_returns_original_items(self, items, N):
"""
Converting an ItemList to a list returns the original items.
"""
c = ItemList(items, N)
assert list(c) == list(items)
def test_length(self):
c = ItemList(["hello", "world", "foobar", "quux"], 4)
assert len(c) == 4
def test_indexing(self):
c = ItemList(["hello", "world", "foobar", "quux"], 4)
assert c[0] == "hello"
assert c[1] == "world"
assert c[2] == "foobar"
assert c[3] == "quux"
def test_write_csv(self, tmpdir):
filename1 = tmpdir.join("output_without_header.csv").strpath
filename2 = tmpdir.join("output_with_default_header.csv").strpath
filename3 = tmpdir.join("output_with_custom_header.csv").strpath
class FoobarGenerator(CustomGenerator):
aaa = HashDigest(length=6)
bbb = Sequential(prefix="Foo_", digits=2)
ccc = Integer(0, 100)
class QuuxGenerator(CustomGenerator):
def __init__(self, foo_items):
self.foo1 = SelectOne(foo_items)
self.foo2 = SelectOne(foo_items)
self.date_str = Timestamp(start="2006-01-01", end="2017-09-01", fmt='%d-%b-%y')
foo = FoobarGenerator()
foo_items = foo.generate(10, seed=12345)
quux = QuuxGenerator(foo_items)
csv_fields = {
'Column_1': 'foo1.aaa',
'Column_2': 'foo1.bbb',
'Column_3': 'foo2.aaa',
'Column_4': 'foo2.ccc',
'Column_5': 'date_str',
}
assert not os.path.exists(filename1)
assert not os.path.exists(filename2)
assert not os.path.exists(filename3)
quux_items = quux.generate(5, seed=99999)
quux_items.to_csv(filename1, fields=csv_fields, header=False)
quux_items = quux.generate(5, seed=99999)
quux_items.to_csv(filename2, fields=csv_fields, header=True)
quux_items = quux.generate(5, seed=99999)
quux_items.to_csv(filename3, fields=csv_fields, header="# This is a custom header line")
assert os.path.exists(filename1)
assert os.path.exists(filename2)
assert os.path.exists(filename3)
expected_output_without_header = textwrap.dedent("""\
95F21B,Foo_09,B8E386,66,17-Apr-10
635FDD,Foo_04,C9A8BC,59,18-Feb-08
B8E386,Foo_08,B8E386,66,20-Jul-14
95F21B,Foo_09,FBAC3D,90,10-Jun-15
3F6E19,Foo_01,C9A8BC,59,15-Sep-06
""")
expected_output_with_default_header = \
("Column_1,Column_2,Column_3,Column_4,Column_5\n" +
expected_output_without_header)
expected_output_with_custom_header = \
("# This is a custom header line\n" +
expected_output_without_header)
assert open(filename1).read() == expected_output_without_header
assert open(filename2).read() == expected_output_with_default_header
assert open(filename3).read() == expected_output_with_custom_header
def test_export_dataframe(self):
"""
Test that to_df() produces the expected pandas dataframe.
"""
class QuuxGenerator(CustomGenerator):
c = Sequential(prefix="quux_", digits=2)
d = Float(7., 8.)
e = Integer(low=3000, high=6000)
g = QuuxGenerator()
items = g.generate(N=4, seed=12345)
df_expected = pd.DataFrame({
'c': ['quux_01', 'quux_02', 'quux_03', 'quux_04'],
'd': [7.429949009333706, 7.137420914800083, 7.820307854938839, 7.145680371214801],
'e': [5895, 5318, 4618, 5606],
})
df = items.to_df()
|
pd.testing.assert_frame_equal(df_expected, df)
|
pandas.testing.assert_frame_equal
|
# -*- coding: utf-8 -*-
"""
Authors: <NAME>
UNESCO-IHE 2017
Contact: <EMAIL>
Repository: https://github.com/wateraccounting/wa
Module: Sheets/sheet4
"""
import os
import pandas as pd
import xml.etree.ElementTree as ET
import time
import cairosvg
def create_sheet4(basin, period, units, data, output, template=False, tolerance = 0.01):
"""
Create sheet 4 of the Water Accounting Plus framework.
Parameters
----------
basin : str
The name of the basin.
period : str
The period of analysis.
units : list
A list with strings of the units of the data on sheet 4a and 4b
respectively.
data : list
List with two values pointing to csv files that contains the water data. The csv file has to
follow an specific format. A sample csv is available here:
https://github.com/wateraccounting/wa/tree/master/Sheets/csv
output : list
Filehandles pointing to the jpg files to be created.
template : list or boolean, optional
A list with two entries of the svg files of the sheet. False
uses the standard svg files. Default is False.
tolerance : float, optional
Range used when checked if different totals match with eachother.
Examples
--------
>>> from watools.Sheets import *
>>> create_sheet4(basin='Helmand', period='2007-2011',
units = ['km3/yr', 'km3/yr'],
data = [r'C:\Sheets\csv\Sample_sheet4_part12.csv',
r'C:\Sheets\csv\Sample_sheet4_part12.csv'],
output = [r'C:\Sheets\sheet_4_part1.png',
r'C:\Sheets\sheet_4_part2.png'])
"""
# import WA+ modules
import watools.General.raster_conversions as RC
if data[0] is not None:
df1 = pd.read_csv(data[0], sep=';')
if data[1] is not None:
df2 = pd.read_csv(data[1], sep=';')
# Read csv part 1
if data[0] is not None:
p1 = dict()
p1['sp_r01_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].SUPPLY_GROUNDWATER)])
p1['sp_r02_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].SUPPLY_GROUNDWATER)])
p1['sp_r03_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].SUPPLY_GROUNDWATER)])
p1['sp_r04_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].SUPPLY_GROUNDWATER)])
p1['sp_r05_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].SUPPLY_GROUNDWATER)])
p1['sp_r06_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].SUPPLY_GROUNDWATER)])
p1['sp_r07_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].SUPPLY_GROUNDWATER)])
p1['sp_r08_c01'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Other")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].SUPPLY_GROUNDWATER)])
p1['dm_r01_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].DEMAND)
p1['dm_r02_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].DEMAND)
p1['dm_r03_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].DEMAND)
p1['dm_r04_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].DEMAND)
p1['dm_r05_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].DEMAND)
p1['dm_r06_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].DEMAND)
p1['dm_r07_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].DEMAND)
p1['dm_r08_c01'] = float(df1.loc[(df1.LANDUSE_TYPE == "Other")].DEMAND)
p1['sp_r01_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r02_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r03_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r04_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r05_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r06_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r07_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r08_c02'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Other")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].NON_RECOVERABLE_SURFACEWATER)])
p1['sp_r01_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].RECOVERABLE_SURFACEWATER)])
p1['sp_r02_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].RECOVERABLE_SURFACEWATER)])
p1['sp_r03_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].RECOVERABLE_SURFACEWATER)])
p1['sp_r04_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].RECOVERABLE_SURFACEWATER)])
p1['sp_r05_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].RECOVERABLE_SURFACEWATER)])
p1['sp_r06_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].RECOVERABLE_SURFACEWATER)])
p1['sp_r07_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].RECOVERABLE_SURFACEWATER)])
p1['sp_r08_c03'] = pd.np.sum([float(df1.loc[(df1.LANDUSE_TYPE == "Other")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].RECOVERABLE_SURFACEWATER)])
p1['wd_r01_c01'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].SUPPLY_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].SUPPLY_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].SUPPLY_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].SUPPLY_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].SUPPLY_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].SUPPLY_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].SUPPLY_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].SUPPLY_GROUNDWATER)])
p1['wd_r02_c01'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].SUPPLY_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].SUPPLY_SURFACEWATER)])
p1['wd_r03_c01'] = pd.np.nansum([p1['wd_r01_c01'],p1['wd_r02_c01']])
p1['sp_r01_c04'] = pd.np.nansum([p1['sp_r01_c02'],p1['sp_r02_c02'],p1['sp_r03_c02'],p1['sp_r04_c02'],p1['sp_r05_c02'],p1['sp_r06_c02'],p1['sp_r07_c02'],p1['sp_r08_c02']])
p1['of_r03_c02'] = pd.np.nansum([p1['sp_r01_c03'],p1['sp_r02_c03'],p1['sp_r03_c03'],p1['sp_r04_c03'],p1['sp_r05_c03'],p1['sp_r06_c03'],p1['sp_r07_c03'],p1['sp_r08_c03']])
p1['of_r02_c01'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].RECOVERABLE_SURFACEWATER)])
p1['of_r04_c01'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].RECOVERABLE_GROUNDWATER)])
p1['of_r03_c01'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].NON_RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].NON_RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].NON_RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].NON_RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].NON_RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].NON_RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].NON_RECOVERABLE_SURFACEWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].NON_RECOVERABLE_SURFACEWATER)])
p1['of_r05_c01'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].NON_RECOVERABLE_GROUNDWATER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].NON_RECOVERABLE_GROUNDWATER)])
p1['of_r04_c02'] = pd.np.nansum([p1['of_r05_c01'],p1['of_r03_c01']])
p1['sp_r02_c04'] = pd.np.nansum([p1['of_r02_c01'],p1['of_r04_c01']])
p1['of_r09_c02'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].CONSUMED_OTHER),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].CONSUMED_OTHER)])
p1['of_r02_c02'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].NON_CONVENTIONAL_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].NON_CONVENTIONAL_ET)])
p1['of_r01_c02'] = pd.np.nansum([float(df1.loc[(df1.LANDUSE_TYPE == "Irrigated crops")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Managed water bodies")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Industry")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Aquaculture")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Residential")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Greenhouses")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Power and Energy")].CONSUMED_ET),
float(df1.loc[(df1.LANDUSE_TYPE == "Other")].CONSUMED_ET)])
p1['of_r01_c01'] =
|
pd.np.nansum([p1['of_r02_c02'],p1['of_r01_c02']])
|
pandas.np.nansum
|
"""
事前準備に
$ pip install pandas
が必要
リファレンス
https://pandas.pydata.org/pandas-docs/stable/reference/index.html
"""
import pandas as pd
csv_path = './data/csv_out.csv'
import csv
# 標準ライブラリを使った内容確認関数
def print_csv(path = csv_path):
with open(path) as f:
print("----csv形式ここから----")
for row in csv.reader(f):
print(row)
print("----csv形式ここまで----")
def print_raw(path = csv_path):
with open(path) as f:
s = f.read()
print("----原文ここから----")
print(s)
print("----原文ここまで----")
print("******** DataFrame の内容をcsvとして吐き出し[二重配列] ********")
df = pd.DataFrame([["kato", 40,11.5, True, "OK"],
["sato",28, 12.2, False, "NG"],
["ito",32, 16.0, True]])
df.index = ["a","b","c"]
df.columns = ["Name", "Age", "Score", "Car", "Comment"]
print(df)
csv_path1 = "./data/csv_pandas1.csv"
df.to_csv(csv_path1)
print_raw(csv_path1)
print("******** DataFrame の内容をcsvとして吐き出し[ディクショナリ] ********")
df = pd.DataFrame([{"Name": "goto", "Age": 12 , "Score":9.5, "Car": False, "Comment": "No" },
{"Name": "suto", "Age": 20 , "Score":25.0, "Car": False, "Comment": "Not" },
{"Name": "mato", "Age": 43 , "Score":12.2, "Car": True, "Comment": "Exist"}]
# columnsを指定しておくとその順番で保持される
,columns = ["Name", "Age", "Car", "Comment", "Score"])
# 初期化時にcolumns指定をしなかった場合はあとで指定しても実際の並び順は変わらない
#df.columns = ["Name", "Age", "Car", "Comment", "Score"]
print(df)
csv_path2 = "./data/csv_pandas2.csv"
df.to_csv(csv_path2)
print_raw(csv_path2)
csv_path_index_header = "./data/csv_in_index_and_header.csv"
csv_path_header = "./data/csv_in_header_only.csv"
csv_path_index = "./data/csv_in_index_only.csv"
csv_path_neither = "./data/csv_in_neither_index_and_header.csv"
print("********ヘッダ、indexのあるcsvファイル********")
# 0列目はindex,headerはdefaultが0行目なので指定の必要なし
df = pd.read_csv(csv_path_index_header, index_col=0)
print(df)
print(df.index) # index(行の項目名)
print(df.columns) # header (列の項目名)
print(df.values) # 値
print("********ヘッダ有り、index無しcsvファイル********")
df = pd.read_csv(csv_path_header)
print(df)
print(df.index) # index(行の項目名)
print(df.columns) # header (列の項目名)
print(df.values) # 値
print("********ヘッダ無し、index有りcsvファイル********")
df = pd.read_csv(csv_path_index, header=None, index_col=0)
print(df)
print(df.index) # index(行の項目名)
print(df.columns) # header (列の項目名)
print(df.values) # 値
print("********ヘッダ無し、index無しcsvファイル********")
df = pd.read_csv(csv_path_neither, header=None, index_col=None)
print(df)
print(df.index) # index(行の項目名)
print(df.columns) # header (列の項目名)
print(df.values) # 値
print("********区切りが'|'を読み込んで' 'の区切りで出力********")
df =
|
pd.read_csv("./data/csv_in_other_delimiter.csv", sep='|')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 7 09:40:49 2018
@author: yuwei
"""
import pandas as pd
import numpy as np
import math
import random
import time
import scipy as sp
import xgboost as xgb
def loadData():
"下载数据"
trainSet = pd.read_table('round1_ijcai_18_train_20180301.txt',sep=' ')
testSet = pd.read_table('round1_ijcai_18_test_a_20180301.txt',sep=' ')
return trainSet,testSet
def splitData(trainSet,testSet):
"按时间划分验证集"
#转化测试集时间戳为标准时间
time_local = testSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
testSet['context_timestamp'] = time_local
#转化训练集时间戳为标准时间
time_local = trainSet.context_timestamp.map(lambda x :time.localtime(x))
time_local = time_local.map(lambda x :time.strftime("%Y-%m-%d %H:%M:%S",x))
trainSet['context_timestamp'] = time_local
del time_local
#处理训练集item_category_list属性
trainSet['item_category_list'] = trainSet.item_category_list.map(lambda x :x.split(';'))
trainSet['item_category_list_2'] = trainSet.item_category_list.map(lambda x :x[1])
trainSet['item_category_list_3'] = trainSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
trainSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,trainSet['item_category_list_2'],trainSet['item_category_list_3']))
#处理测试集item_category_list属性
testSet['item_category_list'] = testSet.item_category_list.map(lambda x :x.split(';'))
testSet['item_category_list_2'] = testSet.item_category_list.map(lambda x :x[1])
testSet['item_category_list_3'] = testSet.item_category_list.map(lambda x :x[2] if len(x) >2 else -1)
testSet['item_category_list_2'] = list(map(lambda x,y : x if (y == -1) else y,testSet['item_category_list_2'],testSet['item_category_list_3']))
del trainSet['item_category_list_3'];del testSet['item_category_list_3'];
#处理predict_category_property的排名
trainSet['predict_category'] = trainSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
trainSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,trainSet['item_category_list_2'],trainSet['predict_category']))
testSet['predict_category'] = testSet['predict_category_property'].map(lambda x :[y.split(':')[0] for y in x.split(';')])
testSet['predict_category_property_rank'] = list(map(lambda x,y:y.index(x) if x in y else -1,testSet['item_category_list_2'],testSet['predict_category']))
#统计item_category_list中和predict_category共同的个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
#不同个数
trainSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),trainSet.item_category_list,trainSet.predict_category))
testSet['item_category_count'] = list(map(lambda x,y:len(set(x)) - len(set(x)&set(y)),testSet.item_category_list,testSet.predict_category))
del trainSet['predict_category']; del testSet['predict_category']
"划分数据集"
#测试集 23-24号特征提取,25号打标
test = testSet
testFeat = trainSet[trainSet['context_timestamp']>'2018-09-23']
#验证集 22-23号特征提取,24号打标
validate = trainSet[trainSet['context_timestamp']>'2018-09-24']
validateFeat = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-24')]
#训练集 21-22号特征提取,23号打标;20-21号特征提取,22号打标;19-20号特征提取,21号打标;18-19号特征提取,20号打标
#标签区间
train1 = trainSet[(trainSet['context_timestamp']>'2018-09-23') & (trainSet['context_timestamp']<'2018-09-24')]
train2 = trainSet[(trainSet['context_timestamp']>'2018-09-22') & (trainSet['context_timestamp']<'2018-09-23')]
train3 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-22')]
train4 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-21')]
#特征区间
trainFeat1 = trainSet[(trainSet['context_timestamp']>'2018-09-21') & (trainSet['context_timestamp']<'2018-09-23')]
trainFeat2 = trainSet[(trainSet['context_timestamp']>'2018-09-20') & (trainSet['context_timestamp']<'2018-09-22')]
trainFeat3 = trainSet[(trainSet['context_timestamp']>'2018-09-19') & (trainSet['context_timestamp']<'2018-09-21')]
trainFeat4 = trainSet[(trainSet['context_timestamp']>'2018-09-18') & (trainSet['context_timestamp']<'2018-09-20')]
return test,testFeat,validate,validateFeat,train1,trainFeat1,train2,trainFeat2,train3,trainFeat3,train4,trainFeat4
def modelXgb(train,test):
"xgb模型"
train_y = train['is_trade'].values
# train_x = train.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade'
# ],axis=1).values
# test_x = test.drop(['item_brand_id','item_city_id','user_id','shop_id','context_id','instance_id', 'item_id','item_category_list','item_property_list', 'context_timestamp',
# 'predict_category_property'
# ],axis=1).values
#根据皮卡尔相关系数,drop相关系数低于-0.2的属性
train_x = train.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property','is_trade',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first'
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service',
],axis=1).values
# test_x = test.drop(['item_brand_id',
# 'item_city_id','user_id','shop_id','context_id',
# 'instance_id', 'item_id','item_category_list',
# 'item_property_list', 'context_timestamp',
# 'predict_category_property','is_trade',
# 'item_price_level','user_rank_down',
# 'item_category_list_2_not_buy_count',
# 'item_category_list_2_count',
# 'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
# ],axis=1).values
test_x = test.drop(['item_brand_id',
'item_city_id','user_id','shop_id','context_id',
'instance_id', 'item_id','item_category_list',
'item_property_list', 'context_timestamp',
'predict_category_property',
'item_price_level','user_rank_down',
'item_category_list_2_not_buy_count',
'item_category_list_2_count',
'user_first',
# 'user_count_label',
# 'item_city_not_buy_count',
# 'item_city_count',
# 'user_shop_rank_down',
# 'item_city_buy_count',
# 'user_item_rank_down',
# 'shop_score_description',
# 'shop_review_positive_rate',
# 'shop_score_delivery',
# 'shop_score_service'
],axis=1).values
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 模型参数
params = {'booster': 'gbtree',
'objective':'binary:logistic',
'eval_metric':'logloss',
'eta': 0.03,
'max_depth': 5, # 6
'colsample_bytree': 0.8,#0.8
'subsample': 0.8,
'scale_pos_weight': 1,
'min_child_weight': 18 # 2
}
# 训练
watchlist = [(dtrain,'train')]
bst = xgb.train(params, dtrain, num_boost_round=700,evals=watchlist)
# 预测
predict = bst.predict(dtest)
# test_xy = test[['instance_id','is_trade']]
test_xy = test[['instance_id']]
test_xy['predicted_score'] = predict
return test_xy
def get_item_feat(data,dataFeat):
"item的特征提取"
result = pd.DataFrame(dataFeat['item_id'])
result = result.drop_duplicates(['item_id'],keep='first')
"1.统计item出现次数"
dataFeat['item_count'] = dataFeat['item_id']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_count',aggfunc='count').reset_index()
del dataFeat['item_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"2.统计item历史被购买的次数"
dataFeat['item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_id'],values='item_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_buy_count']
result = pd.merge(result,feat,on=['item_id'],how='left')
"3.统计item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_buy_count,result.item_count))
result['item_buy_ratio'] = buy_ratio
"4.统计item历史未被够买的次数"
result['item_not_buy_count'] = result['item_count'] - result['item_buy_count']
return result
def get_user_feat(data,dataFeat):
"user的特征提取"
result = pd.DataFrame(dataFeat['user_id'])
result = result.drop_duplicates(['user_id'],keep='first')
"1.统计user出现次数"
dataFeat['user_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_count',aggfunc='count').reset_index()
del dataFeat['user_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"2.统计user历史被购买的次数"
dataFeat['user_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id'],values='user_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_buy_count']
result = pd.merge(result,feat,on=['user_id'],how='left')
"3.统计user转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_buy_count,result.user_count))
result['user_buy_ratio'] = buy_ratio
"4.统计user历史未被够买的次数"
result['user_not_buy_count'] = result['user_count'] - result['user_buy_count']
return result
def get_context_feat(data,dataFeat):
"context的特征提取"
result = pd.DataFrame(dataFeat['context_id'])
result = result.drop_duplicates(['context_id'],keep='first')
"1.统计context出现次数"
dataFeat['context_count'] = dataFeat['context_id']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_count',aggfunc='count').reset_index()
del dataFeat['context_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"2.统计context历史被购买的次数"
dataFeat['context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_id'],values='context_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_buy_count']
result = pd.merge(result,feat,on=['context_id'],how='left')
"3.统计context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_buy_count,result.context_count))
result['context_buy_ratio'] = buy_ratio
"4.统计context历史未被够买的次数"
result['context_not_buy_count'] = result['context_count'] - result['context_buy_count']
return result
def get_shop_feat(data,dataFeat):
"shop的特征提取"
result = pd.DataFrame(dataFeat['shop_id'])
result = result.drop_duplicates(['shop_id'],keep='first')
"1.统计shop出现次数"
dataFeat['shop_count'] = dataFeat['shop_id']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_count',aggfunc='count').reset_index()
del dataFeat['shop_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"2.统计shop历史被购买的次数"
dataFeat['shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_id'],values='shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_buy_count']
result = pd.merge(result,feat,on=['shop_id'],how='left')
"3.统计shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_buy_count,result.shop_count))
result['shop_buy_ratio'] = buy_ratio
"4.统计shop历史未被够买的次数"
result['shop_not_buy_count'] = result['shop_count'] - result['shop_buy_count']
return result
def get_timestamp_feat(data,dataFeat):
"context_timestamp的特征提取"
result = pd.DataFrame(dataFeat['context_timestamp'])
result = result.drop_duplicates(['context_timestamp'],keep='first')
"1.统计context_timestamp出现次数"
dataFeat['context_timestamp_count'] = dataFeat['context_timestamp']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['context_timestamp_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"2.统计context_timestamp历史被购买的次数"
dataFeat['context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_timestamp'],values='context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_timestamp_buy_count']
result = pd.merge(result,feat,on=['context_timestamp'],how='left')
"3.统计context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_timestamp_buy_count,result.context_timestamp_count))
result['context_timestamp_buy_ratio'] = buy_ratio
"4.统计context_timestamp历史未被够买的次数"
result['context_timestamp_not_buy_count'] = result['context_timestamp_count'] - result['context_timestamp_buy_count']
return result
def get_item_brand_feat(data,dataFeat):
"item_brand的特征提取"
result = pd.DataFrame(dataFeat['item_brand_id'])
result = result.drop_duplicates(['item_brand_id'],keep='first')
"1.统计item_brand出现次数"
dataFeat['item_brand_count'] = dataFeat['item_brand_id']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_count',aggfunc='count').reset_index()
del dataFeat['item_brand_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"2.统计item_brand历史被购买的次数"
dataFeat['item_brand_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_brand_id'],values='item_brand_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_brand_buy_count']
result = pd.merge(result,feat,on=['item_brand_id'],how='left')
"3.统计item_brand转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_brand_buy_count,result.item_brand_count))
result['item_brand_buy_ratio'] = buy_ratio
"4.统计item_brand历史未被够买的次数"
result['item_brand_not_buy_count'] = result['item_brand_count'] - result['item_brand_buy_count']
return result
def get_item_city_feat(data,dataFeat):
"item_city的特征提取"
result = pd.DataFrame(dataFeat['item_city_id'])
result = result.drop_duplicates(['item_city_id'],keep='first')
"1.统计item_city出现次数"
dataFeat['item_city_count'] = dataFeat['item_city_id']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_count',aggfunc='count').reset_index()
del dataFeat['item_city_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"2.统计item_city历史被购买的次数"
dataFeat['item_city_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_city_id'],values='item_city_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_city_buy_count']
result = pd.merge(result,feat,on=['item_city_id'],how='left')
"3.统计item_city转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_city_buy_count,result.item_city_count))
result['item_city_buy_ratio'] = buy_ratio
"4.统计item_city历史未被够买的次数"
result['item_city_not_buy_count'] = result['item_city_count'] - result['item_city_buy_count']
return result
def get_user_gender_feat(data,dataFeat):
"user_gender的特征提取"
result = pd.DataFrame(dataFeat['user_gender_id'])
result = result.drop_duplicates(['user_gender_id'],keep='first')
"1.统计user_gender出现次数"
dataFeat['user_gender_count'] = dataFeat['user_gender_id']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_count',aggfunc='count').reset_index()
del dataFeat['user_gender_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"2.统计user_gender历史被购买的次数"
dataFeat['user_gender_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_gender_id'],values='user_gender_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_gender_buy_count']
result = pd.merge(result,feat,on=['user_gender_id'],how='left')
"3.统计user_gender转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_gender_buy_count,result.user_gender_count))
result['user_gender_buy_ratio'] = buy_ratio
"4.统计user_gender历史未被够买的次数"
result['user_gender_not_buy_count'] = result['user_gender_count'] - result['user_gender_buy_count']
return result
def get_user_occupation_feat(data,dataFeat):
"user_occupation的特征提取"
result = pd.DataFrame(dataFeat['user_occupation_id'])
result = result.drop_duplicates(['user_occupation_id'],keep='first')
"1.统计user_occupation出现次数"
dataFeat['user_occupation_count'] = dataFeat['user_occupation_id']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_count',aggfunc='count').reset_index()
del dataFeat['user_occupation_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"2.统计user_occupation历史被购买的次数"
dataFeat['user_occupation_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_occupation_id'],values='user_occupation_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_occupation_buy_count']
result = pd.merge(result,feat,on=['user_occupation_id'],how='left')
"3.统计user_occupation转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_occupation_buy_count,result.user_occupation_count))
result['user_occupation_buy_ratio'] = buy_ratio
"4.统计user_occupation历史未被够买的次数"
result['user_occupation_not_buy_count'] = result['user_occupation_count'] - result['user_occupation_buy_count']
return result
def get_context_page_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['context_page_id'])
result = result.drop_duplicates(['context_page_id'],keep='first')
"1.统计context_page出现次数"
dataFeat['context_page_count'] = dataFeat['context_page_id']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_count',aggfunc='count').reset_index()
del dataFeat['context_page_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"2.统计context_page历史被购买的次数"
dataFeat['context_page_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['context_page_id'],values='context_page_buy_count',aggfunc='sum').reset_index()
del dataFeat['context_page_buy_count']
result = pd.merge(result,feat,on=['context_page_id'],how='left')
"3.统计context_page转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.context_page_buy_count,result.context_page_count))
result['context_page_buy_ratio'] = buy_ratio
"4.统计context_page历史未被够买的次数"
result['context_page_not_buy_count'] = result['context_page_count'] - result['context_page_buy_count']
return result
def get_shop_review_num_level_feat(data,dataFeat):
"context_page的特征提取"
result = pd.DataFrame(dataFeat['shop_review_num_level'])
result = result.drop_duplicates(['shop_review_num_level'],keep='first')
"1.统计shop_review_num_level出现次数"
dataFeat['shop_review_num_level_count'] = dataFeat['shop_review_num_level']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['shop_review_num_level_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"2.统计shop_review_num_level历史被购买的次数"
dataFeat['shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['shop_review_num_level'],values='shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['shop_review_num_level'],how='left')
"3.统计shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.shop_review_num_level_buy_count,result.shop_review_num_level_count))
result['shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计shop_review_num_level历史未被够买的次数"
result['shop_review_num_level_not_buy_count'] = result['shop_review_num_level_count'] - result['shop_review_num_level_buy_count']
return result
def get_item_category_list_2_feat(data,dataFeat):
"item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat['item_category_list_2'])
result = result.drop_duplicates(['item_category_list_2'],keep='first')
"1.统计item_category_list_2出现次数"
dataFeat['item_category_list_2_count'] = dataFeat['item_category_list_2']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_count',aggfunc='count').reset_index()
del dataFeat['item_category_list_2_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"2.统计item_category_list_2历史被购买的次数"
dataFeat['item_category_list_2_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['item_category_list_2'],values='item_category_list_2_buy_count',aggfunc='sum').reset_index()
del dataFeat['item_category_list_2_buy_count']
result = pd.merge(result,feat,on=['item_category_list_2'],how='left')
"3.统计item_category_list_2转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.item_category_list_2_buy_count,result.item_category_list_2_count))
result['item_category_list_2_buy_ratio'] = buy_ratio
"4.统计item_category_list_2历史未被够买的次数"
result['item_category_list_2_not_buy_count'] = result['item_category_list_2_count'] - result['item_category_list_2_buy_count']
return result
def get_user_item_feat(data,dataFeat):
"user-item的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_id']])
result = result.drop_duplicates(['user_id','item_id'],keep='first')
"1.统计user-item出现次数"
dataFeat['user_item_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_count',aggfunc='count').reset_index()
del dataFeat['user_item_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"2.统计user-item历史被购买的次数"
dataFeat['user_item_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_id'],values='user_item_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_buy_count']
result = pd.merge(result,feat,on=['user_id','item_id'],how='left')
"3.统计user-item转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_buy_count,result.user_item_count))
result['user_item_buy_ratio'] = buy_ratio
"4.统计user-item历史未被够买的次数"
result['user_item_not_buy_count'] = result['user_item_count'] - result['user_item_buy_count']
return result
def get_user_shop_feat(data,dataFeat):
"user-shop的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_id']])
result = result.drop_duplicates(['user_id','shop_id'],keep='first')
"1.统计user-shop出现次数"
dataFeat['user_shop_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_count',aggfunc='count').reset_index()
del dataFeat['user_shop_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"2.统计user-shop历史被购买的次数"
dataFeat['user_shop_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_id'],values='user_shop_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_id'],how='left')
"3.统计user-shop转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_buy_count,result.user_shop_count))
result['user_shop_buy_ratio'] = buy_ratio
"4.统计user-shop历史未被够买的次数"
result['user_shop_not_buy_count'] = result['user_shop_count'] - result['user_shop_buy_count']
return result
def get_user_context_feat(data,dataFeat):
"user-context的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_id']])
result = result.drop_duplicates(['user_id','context_id'],keep='first')
"1.统计user-context出现次数"
dataFeat['user_context_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_count',aggfunc='count').reset_index()
del dataFeat['user_context_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"2.统计user-context历史被购买的次数"
dataFeat['user_context_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_id'],values='user_context_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_buy_count']
result = pd.merge(result,feat,on=['user_id','context_id'],how='left')
"3.统计user-context转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_buy_count,result.user_context_count))
result['user_context_buy_ratio'] = buy_ratio
"4.统计user-context历史未被够买的次数"
result['user_context_not_buy_count'] = result['user_context_count'] - result['user_context_buy_count']
return result
def get_user_timestamp_feat(data,dataFeat):
"user-context_timestamp的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_timestamp']])
result = result.drop_duplicates(['user_id','context_timestamp'],keep='first')
"1.统计user-context_timestamp出现次数"
dataFeat['user_context_timestamp_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_count',aggfunc='count').reset_index()
del dataFeat['user_context_timestamp_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"2.统计user-context_timestamp历史被购买的次数"
dataFeat['user_context_timestamp_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_timestamp'],values='user_context_timestamp_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_timestamp_buy_count']
result = pd.merge(result,feat,on=['user_id','context_timestamp'],how='left')
"3.统计user-context_timestamp转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_timestamp_buy_count,result.user_context_timestamp_count))
result['user_context_timestamp_buy_ratio'] = buy_ratio
"4.统计user-context_timestamp历史未被够买的次数"
result['user_context_timestamp_not_buy_count'] = result['user_context_timestamp_count'] - result['user_context_timestamp_buy_count']
return result
def get_user_item_brand_feat(data,dataFeat):
"user-item_brand的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_brand_id']])
result = result.drop_duplicates(['user_id','item_brand_id'],keep='first')
"1.统计user-item_brand_id出现次数"
dataFeat['user_item_brand_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_brand_id_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"2.统计user-item_brand_id历史被购买的次数"
dataFeat['user_item_brand_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_brand_id'],values='user_item_brand_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_brand_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_brand_id'],how='left')
"3.统计user-item_brand_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_brand_id_buy_count,result.user_item_brand_id_count))
result['user_item_brand_id_buy_ratio'] = buy_ratio
"4.统计user-item_brand_id历史未被够买的次数"
result['user_item_brand_id_not_buy_count'] = result['user_item_brand_id_count'] - result['user_item_brand_id_buy_count']
return result
def get_user_user_gender_feat(data,dataFeat):
"user-user_gender的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_gender_id']])
result = result.drop_duplicates(['user_id','user_gender_id'],keep='first')
"1.统计user-user_gender_id出现次数"
dataFeat['user_user_gender_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_gender_id_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"2.统计user-user_gender_id历史被购买的次数"
dataFeat['user_user_gender_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_gender_id'],values='user_user_gender_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_gender_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_gender_id'],how='left')
"3.统计user-user_gender_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_gender_id_buy_count,result.user_user_gender_id_count))
result['user_user_gender_id_buy_ratio'] = buy_ratio
"4.统计user-user_gender_id历史未被够买的次数"
result['user_user_gender_id_not_buy_count'] = result['user_user_gender_id_count'] - result['user_user_gender_id_buy_count']
return result
def get_user_item_city_feat(data,dataFeat):
"user-item_city的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_city_id']])
result = result.drop_duplicates(['user_id','item_city_id'],keep='first')
"1.统计user-item_city_id出现次数"
dataFeat['user_item_city_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_count',aggfunc='count').reset_index()
del dataFeat['user_item_city_id_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"2.统计user-item_city_id历史被购买的次数"
dataFeat['user_item_city_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','item_city_id'],values='user_item_city_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_item_city_id_buy_count']
result = pd.merge(result,feat,on=['user_id','item_city_id'],how='left')
"3.统计user-item_city_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_item_city_id_buy_count,result.user_item_city_id_count))
result['user_item_city_id_buy_ratio'] = buy_ratio
"4.统计user-item_city_id历史未被够买的次数"
result['user_item_city_id_not_buy_count'] = result['user_item_city_id_count'] - result['user_item_city_id_buy_count']
return result
def get_user_context_page_feat(data,dataFeat):
"user-context_page的特征提取"
result = pd.DataFrame(dataFeat[['user_id','context_page_id']])
result = result.drop_duplicates(['user_id','context_page_id'],keep='first')
"1.统计user-context_page_id出现次数"
dataFeat['user_context_page_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_count',aggfunc='count').reset_index()
del dataFeat['user_context_page_id_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"2.统计user-context_page_id历史被购买的次数"
dataFeat['user_context_page_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','context_page_id'],values='user_context_page_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_context_page_id_buy_count']
result = pd.merge(result,feat,on=['user_id','context_page_id'],how='left')
"3.统计user-context_page_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_context_page_id_buy_count,result.user_context_page_id_count))
result['user_context_page_id_buy_ratio'] = buy_ratio
"4.统计user-context_page_id历史未被够买的次数"
result['user_context_page_id_not_buy_count'] = result['user_context_page_id_count'] - result['user_context_page_id_buy_count']
return result
def get_user_user_occupation_feat(data,dataFeat):
"user-user_occupation的特征提取"
result = pd.DataFrame(dataFeat[['user_id','user_occupation_id']])
result = result.drop_duplicates(['user_id','user_occupation_id'],keep='first')
"1.统计user-user_occupation_id出现次数"
dataFeat['user_user_occupation_id_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_count',aggfunc='count').reset_index()
del dataFeat['user_user_occupation_id_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"2.统计user-user_occupation_id历史被购买的次数"
dataFeat['user_user_occupation_id_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','user_occupation_id'],values='user_user_occupation_id_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_user_occupation_id_buy_count']
result = pd.merge(result,feat,on=['user_id','user_occupation_id'],how='left')
"3.统计user-user_occupation_id转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_user_occupation_id_buy_count,result.user_user_occupation_id_count))
result['user_user_occupation_id_buy_ratio'] = buy_ratio
"4.统计user-user_occupation_id历史未被够买的次数"
result['user_user_occupation_id_not_buy_count'] = result['user_user_occupation_id_count'] - result['user_user_occupation_id_buy_count']
return result
def get_user_shop_review_num_level_feat(data,dataFeat):
"user-shop_review_num_level的特征提取"
result = pd.DataFrame(dataFeat[['user_id','shop_review_num_level']])
result = result.drop_duplicates(['user_id','shop_review_num_level'],keep='first')
"1.统计user-shop_review_num_level出现次数"
dataFeat['user_shop_review_num_level_count'] = dataFeat['user_id']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_count',aggfunc='count').reset_index()
del dataFeat['user_shop_review_num_level_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"2.统计user-shop_review_num_level历史被购买的次数"
dataFeat['user_shop_review_num_level_buy_count'] = dataFeat['is_trade']
feat = pd.pivot_table(dataFeat,index=['user_id','shop_review_num_level'],values='user_shop_review_num_level_buy_count',aggfunc='sum').reset_index()
del dataFeat['user_shop_review_num_level_buy_count']
result = pd.merge(result,feat,on=['user_id','shop_review_num_level'],how='left')
"3.统计user-shop_review_num_level转化率特征"
buy_ratio = list(map(lambda x,y : -1 if y == 0 else x/y,result.user_shop_review_num_level_buy_count,result.user_shop_review_num_level_count))
result['user_shop_review_num_level_buy_ratio'] = buy_ratio
"4.统计user-shop_review_num_level历史未被够买的次数"
result['user_shop_review_num_level_not_buy_count'] = result['user_shop_review_num_level_count'] - result['user_shop_review_num_level_buy_count']
return result
def get_user_item_category_list_2_feat(data,dataFeat):
"user-item_category_list_2的特征提取"
result = pd.DataFrame(dataFeat[['user_id','item_category_list_2']])
result = result.drop_duplicates(['user_id','item_category_list_2'],keep='first')
"1.统计user-item_category_list_2出现次数"
dataFeat['user_item_category_list_2_count'] = dataFeat['user_id']
feat =
|
pd.pivot_table(dataFeat,index=['user_id','item_category_list_2'],values='user_item_category_list_2_count',aggfunc='count')
|
pandas.pivot_table
|
"""
Author: <NAME>
"""
import math
import numpy as np
import pandas as pd
from tqdm import tqdm
from bloomberg import BBG
from datetime import timedelta
from pandas.tseries.offsets import BDay
class CommFutureTracker(object):
"""
Class for creating excess return indices for commodity futures using data from bloomberg.
A default front-month roll schedule is assumed but it can be provided by the user.
At the start date, we assume we trade 100 units of the commodity in the contract defined by the roll schedule.
We MtM the position over the month and then roll it into the next contracts as defined by the roll schedule.
Commodities belonging to the Bloomberg Commodity Index (BCOM) and the S&P GSCI Commodity Index are covered.
The S&P GSCI Commodity Index is the default roll schedule but BCOM and used-defined are also supported.
ROLL SCHEDULE synthax:
The roll schedule is a list of size 12, each element corresponding to a month of the year in their natural order.
The list should contain a month code referring to the maturity of the contract to be held in that month according
to the table below:
|Month |Month Code|
|-----------|----------|
|January | F |
|February | G |
|March | H |
|April | J |
|May | K |
|June | M |
|July | N |
|August | Q |
|September | U |
|October | V |
|November | X |
|December | Z |
when the letter is followed by a + sign, it means that the maturity of the contract is in the following year
Example: The roll schedule [N, N, N, N, N, Z, Z, Z, H+, H+, H+, H+] does the following:
Holds the contracting maturinig in July of the same year for the first five months of the year,
then rolls that position into the December contract maturinig in the same year
and holds that position for the next three months,
then rolls that position into the March contract maturing the following year
and holds that position until the end of the year
rolls that position into the March contract maturing next year,
then rolls that position into the July contract in January
"""
# These are the roll schedules followed by the commodities in the Bloomberg Commodity Index
# See https://data.bloomberglp.com/indices/sites/2/2018/02/BCOM-Methodology-January-2018_FINAL-2.pdf
bcom_roll_schedules = {
'C ': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'S ': ['H', 'H', 'K', 'K', 'N', 'N', 'X', 'X', 'X', 'X', 'F+', 'F+'],
'SM': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'F+', 'F+'],
'BO': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'F+', 'F+'],
'W ': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'KW': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'CC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'CT': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'Z', 'H+'],
'KC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'LC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],
'LH': ['G', 'J', 'J', 'M', 'M', 'N', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],
'SB': ['H', 'H', 'K', 'K', 'N', 'N', 'V', 'V', 'V', 'H+', 'H+', 'H+'],
'CL': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'CO': ['H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+', 'H+'],
'HO': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'QS': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'XB': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'NG': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'HG': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'LN': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LX': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LA': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'GC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'Z', 'Z', 'Z', 'Z', 'G+'],
'SI': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
}
# These are the roll schedules followed by the commodities in the S&P GSCI Commodity Index
# See https://www.spindices.com/documents/methodologies/methodology-sp-gsci.pdf
gsci_roll_schedules = {
'C ': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'S ': ['H', 'H', 'K', 'K', 'N', 'N', 'X', 'X', 'X', 'X', 'F+', 'F+'],
'W ': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'KW': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'SB': ['H', 'H', 'K', 'K', 'N', 'N', 'V', 'V', 'V', 'H+', 'H+', 'H+'],
'CC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'CT': ['H', 'H', 'K', 'K', 'N', 'N', 'Z', 'Z', 'Z', 'Z', 'Z', 'H+'],
'KC': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'OJ': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'FC': ['H', 'H', 'K', 'K', 'Q', 'Q', 'Q', 'V', 'V', 'F+', 'F+', 'F+'],
'LC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],
'LH': ['G', 'J', 'J', 'M', 'M', 'N', 'Q', 'V', 'V', 'Z', 'Z', 'G+'],
'CL': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'CO': ['H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+', 'H+'],
'HO': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'QS': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'XB': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'NG': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LX': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LL': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LN': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LT': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LP': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'LA': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'X', 'X', 'F+', 'F+'],
'GC': ['G', 'J', 'J', 'M', 'M', 'Q', 'Q', 'Z', 'Z', 'Z', 'Z', 'G+'],
'SI': ['H', 'H', 'K', 'K', 'N', 'N', 'U', 'U', 'Z', 'Z', 'Z', 'H+'],
'PL': ['J', 'J', 'J', 'N', 'N', 'N', 'V', 'V', 'V', 'F+', 'F+', 'F+'],
}
sector_dict = {'C ': 'Grains',
'S ': 'Grains',
'SM': 'Grains',
'BO': 'Grains',
'W ': 'Grains',
'KW': 'Grains',
'CC': 'Softs',
'CT': 'Softs',
'KC': 'Softs',
'LC': 'Livestock',
'LH': 'Livestock',
'SB': 'Softs',
'CL': 'Energy',
'CO': 'Energy',
'HO': 'Energy',
'QS': 'Energy',
'XB': 'Energy',
'NG': 'Energy',
'HG': 'Base Metals',
'LN': 'Base Metals',
'LX': 'Base Metals',
'LA': 'Base Metals',
'GC': 'Precious Metals',
'SI': 'Precious Metals'}
def __init__(self, comm_bbg_code, start_date='2004-01-05', end_date='today',
roll_schedule='GSCI', roll_start_bday=5, roll_window_size=5):
"""
Returns an object with the following attributes:
- contract_list: codes for all of the future contracts used in the tracker.
- first_notice_dates: first notice dates for all of the future contracts used in the tracker. # TODO is this necessary?
- tickers: list with 2 strs with Bloomberg ticker for the spot rates and 1M forward rates
- spot_rate: Series with the spot rate data
- fwd: Series with the 1M fwd rate data
- er_index: Series with the excess return index
- ts_df: DataFrame with columns 'Spot', 'Fwd', and 'Excess Return Index'
:param comm_bbg_code: 1- or 2-character str with the Bloomberg code for the commoity.
:param start_date: Starting date for the tracker, on any format accepted by pandas.to_datetime().
:param end_date: last date of the tracker, on any format accepted by pandas.to_datetime().
:param roll_schedule: 12 element list with the rolling schedule
:param roll_start_bday: #TODO finnish
:param roll_window_size: #TODO finnish
"""
comm_bbg_code = comm_bbg_code.upper()
if type(roll_schedule) == list:
assert len(roll_schedule) == 12, 'Size of roll_schedule must be 12'
self.roll_schedule = roll_schedule
elif roll_schedule.upper() == 'BCOM':
assert comm_bbg_code in self.bcom_roll_schedules.keys(), f'BCOM does not support {comm_bbg_code}'
self.roll_schedule = self.bcom_roll_schedules[comm_bbg_code]
elif roll_schedule.upper() == 'GSCI':
assert comm_bbg_code in self.gsci_roll_schedules.keys(), f'GSCI does not support {comm_bbg_code}'
self.roll_schedule = self.gsci_roll_schedules[comm_bbg_code]
else:
raise ValueError('Roll schedule not supported')
self.comm_bbg_code = comm_bbg_code.upper()
self.roll_start_bday = roll_start_bday
self.roll_window_size = roll_window_size
self.start_date = (pd.to_datetime(start_date) + BDay(1)).date()
self.end_date =
|
pd.to_datetime(end_date)
|
pandas.to_datetime
|
##### file path
### input
# data_set keys and lebels
path_df_part_1_uic_label = "df_part_1_uic_label.csv"
path_df_part_2_uic_label = "df_part_2_uic_label.csv"
path_df_part_3_uic = "df_part_3_uic.csv"
# data_set features
path_df_part_1_U = "df_part_1_U.csv"
path_df_part_1_I = "df_part_1_I.csv"
path_df_part_1_C = "df_part_1_C.csv"
path_df_part_1_IC = "df_part_1_IC.csv"
path_df_part_1_UI = "df_part_1_UI.csv"
path_df_part_1_UC = "df_part_1_UC.csv"
path_df_part_2_U = "df_part_2_U.csv"
path_df_part_2_I = "df_part_2_I.csv"
path_df_part_2_C = "df_part_2_C.csv"
path_df_part_2_IC = "df_part_2_IC.csv"
path_df_part_2_UI = "df_part_2_UI.csv"
path_df_part_2_UC = "df_part_2_UC.csv"
path_df_part_3_U = "df_part_3_U.csv"
path_df_part_3_I = "df_part_3_I.csv"
path_df_part_3_C = "df_part_3_C.csv"
path_df_part_3_IC = "df_part_3_IC.csv"
path_df_part_3_UI = "df_part_3_UI.csv"
path_df_part_3_UC = "df_part_3_UC.csv"
### out file
### intermediate file
# data partition with diffferent label
path_df_part_1_uic_label_0 = "df_part_1_uic_label_0.csv"
path_df_part_1_uic_label_1 = "df_part_1_uic_label_1.csv"
path_df_part_2_uic_label_0 = "df_part_2_uic_label_0.csv"
path_df_part_2_uic_label_1 = "df_part_2_uic_label_1.csv"
# training set keys uic-label with k_means clusters' label
path_df_part_1_uic_label_cluster = "df_part_1_uic_label_cluster.csv"
path_df_part_2_uic_label_cluster = "df_part_2_uic_label_cluster.csv"
# scalers for data standardization store as python pickle
# for each part's features
path_df_part_1_scaler = "df_part_1_scaler"
path_df_part_2_scaler = "df_part_2_scaler"
import pandas as pd
import numpy as np
def df_read(path, mode='r'):
'''the definition of dataframe loading function
'''
path_df = open(path, mode)
try:
df = pd.read_csv(path_df, index_col=False)
finally:
path_df.close()
return df
def subsample(df, sub_size):
'''the definition of sub-sampling function
@param df: dataframe
@param sub_size: sub_sample set size
@return sub-dataframe with the same formation of df
'''
if sub_size >= len(df):
return df
else:
return df.sample(n=sub_size)
########################################################################
'''Step 1: dividing of positive and negative sub-set by u-i-c-label keys
p.s. we first generate u-i-C key, then merging for data set and operation by chunk
such strange operation designed for saving my poor PC-MEM.
'''
df_part_1_uic_label = df_read(path_df_part_1_uic_label) # loading total keys
df_part_2_uic_label = df_read(path_df_part_2_uic_label)
df_part_1_uic_label_0 = df_part_1_uic_label[df_part_1_uic_label['label'] == 0]
df_part_1_uic_label_1 = df_part_1_uic_label[df_part_1_uic_label['label'] == 1]
df_part_2_uic_label_0 = df_part_2_uic_label[df_part_2_uic_label['label'] == 0]
df_part_2_uic_label_1 = df_part_2_uic_label[df_part_2_uic_label['label'] == 1]
df_part_1_uic_label_0.to_csv(path_df_part_1_uic_label_0, index=False)
df_part_1_uic_label_1.to_csv(path_df_part_1_uic_label_1, index=False)
df_part_2_uic_label_0.to_csv(path_df_part_2_uic_label_0, index=False)
df_part_2_uic_label_1.to_csv(path_df_part_2_uic_label_1, index=False)
#######################################################################
'''Step 2: clustering on negative sub-set
clusters number ~ 35, using mini-batch-k-means
'''
# clustering based on sklearn
from sklearn import preprocessing
from sklearn.cluster import MiniBatchKMeans
import pickle
##### part_1 #####
# loading features
df_part_1_U = df_read(path_df_part_1_U)
df_part_1_I = df_read(path_df_part_1_I)
df_part_1_C = df_read(path_df_part_1_C)
df_part_1_IC = df_read(path_df_part_1_IC)
df_part_1_UI = df_read(path_df_part_1_UI)
df_part_1_UC = df_read(path_df_part_1_UC)
# process by chunk as ui-pairs size is too big
# for get scale transform mechanism to large scale of data
scaler_1 = preprocessing.StandardScaler()
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=150000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
# getting all the complete features for clustering
train_X_1 = train_data_df_part_1.as_matrix(
['u_b1_count_in_6', 'u_b2_count_in_6', 'u_b3_count_in_6', 'u_b4_count_in_6', 'u_b_count_in_6',
'u_b1_count_in_3', 'u_b2_count_in_3', 'u_b3_count_in_3', 'u_b4_count_in_3', 'u_b_count_in_3',
'u_b1_count_in_1', 'u_b2_count_in_1', 'u_b3_count_in_1', 'u_b4_count_in_1', 'u_b_count_in_1',
'u_b4_rate',
'i_u_count_in_6', 'i_u_count_in_3', 'i_u_count_in_1',
'i_b1_count_in_6', 'i_b2_count_in_6', 'i_b3_count_in_6', 'i_b4_count_in_6', 'i_b_count_in_6',
'i_b1_count_in_3', 'i_b2_count_in_3', 'i_b3_count_in_3', 'i_b4_count_in_3', 'i_b_count_in_3',
'i_b1_count_in_1', 'i_b2_count_in_1', 'i_b3_count_in_1', 'i_b4_count_in_1', 'i_b_count_in_1',
'i_b4_rate',
'c_b1_count_in_6', 'c_b2_count_in_6', 'c_b3_count_in_6', 'c_b4_count_in_6', 'c_b_count_in_6',
'c_b1_count_in_3', 'c_b2_count_in_3', 'c_b3_count_in_3', 'c_b4_count_in_3', 'c_b_count_in_3',
'c_b1_count_in_1', 'c_b2_count_in_1', 'c_b3_count_in_1', 'c_b4_count_in_1', 'c_b_count_in_1',
'c_b4_rate',
'ic_u_rank_in_c', 'ic_b_rank_in_c', 'ic_b4_rank_in_c',
'ui_b1_count_in_6', 'ui_b2_count_in_6', 'ui_b3_count_in_6', 'ui_b4_count_in_6', 'ui_b_count_in_6',
'ui_b1_count_in_3', 'ui_b2_count_in_3', 'ui_b3_count_in_3', 'ui_b4_count_in_3', 'ui_b_count_in_3',
'ui_b1_count_in_1', 'ui_b2_count_in_1', 'ui_b3_count_in_1', 'ui_b4_count_in_1', 'ui_b_count_in_1',
'ui_b_count_rank_in_u', 'ui_b_count_rank_in_uc',
'uc_b1_count_in_6', 'uc_b2_count_in_6', 'uc_b3_count_in_6', 'uc_b4_count_in_6', 'uc_b_count_in_6',
'uc_b1_count_in_3', 'uc_b2_count_in_3', 'uc_b3_count_in_3', 'uc_b4_count_in_3', 'uc_b_count_in_3',
'uc_b1_count_in_1', 'uc_b2_count_in_1', 'uc_b3_count_in_1', 'uc_b4_count_in_1', 'uc_b_count_in_1',
'uc_b_count_rank_in_u'])
# feature standardization
scaler_1.partial_fit(train_X_1)
batch += 1
print('chunk %d done.' % batch)
except StopIteration:
print("finish.")
break
# initial clusters
mbk_1 = MiniBatchKMeans(init='k-means++', n_clusters=1000, batch_size=500, reassignment_ratio=10 ** -4)
classes_1 = []
batch = 0
for df_part_1_uic_label_0 in pd.read_csv(open(path_df_part_1_uic_label_0, 'r'), chunksize=15000):
try:
# construct of part_1's sub-training set
train_data_df_part_1 = pd.merge(df_part_1_uic_label_0, df_part_1_U, how='left', on=['user_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_I, how='left', on=['item_id'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_C, how='left', on=['item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_IC, how='left', on=['item_id', 'item_category'])
train_data_df_part_1 = pd.merge(train_data_df_part_1, df_part_1_UI, how='left',
on=['user_id', 'item_id', 'item_category', 'label'])
train_data_df_part_1 =
|
pd.merge(train_data_df_part_1, df_part_1_UC, how='left', on=['user_id', 'item_category'])
|
pandas.merge
|
#
# Copyright (C) 2021 The Delta Lake Project Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import pytest
from datetime import date
from typing import Optional, Sequence
import pandas as pd
from delta_sharing.protocol import AddFile, Metadata, Table
from delta_sharing.reader import DeltaSharingReader
from delta_sharing.rest_client import ListFilesInTableResponse, DataSharingRestClient
from delta_sharing.tests.conftest import ENABLE_INTEGRATION, SKIP_MESSAGE
def test_to_pandas_non_partitioned(tmp_path):
pdf1 = pd.DataFrame({"a": [1, 2, 3], "b": ["a", "b", "c"]})
pdf2 = pd.DataFrame({"a": [4, 5, 6], "b": ["d", "e", "f"]})
pdf1.to_parquet(tmp_path / "pdf1.parquet")
pdf2.to_parquet(tmp_path / "pdf2.parquet")
class RestClientMock:
def list_files_in_table(
self,
table: Table,
*,
predicateHints: Optional[Sequence[str]] = None,
limitHint: Optional[int] = None,
) -> ListFilesInTableResponse:
assert table == Table("table_name", "share_name", "schema_name")
metadata = Metadata(
schema_string=(
'{"fields":['
'{"metadata":{},"name":"a","nullable":true,"type":"long"},'
'{"metadata":{},"name":"b","nullable":true,"type":"string"}'
'],"type":"struct"}'
)
)
add_files = [
AddFile(
url=str(tmp_path / "pdf1.parquet"),
id="pdf1",
partition_values={},
size=0,
stats="",
),
AddFile(
url=str(tmp_path / "pdf2.parquet"),
id="pdf2",
partition_values={},
size=0,
stats="",
),
]
return ListFilesInTableResponse(
table=table, protocol=None, metadata=metadata, add_files=add_files
)
reader = DeltaSharingReader(Table("table_name", "share_name", "schema_name"), RestClientMock())
pdf = reader.to_pandas()
expected = pd.concat([pdf1, pdf2]).reset_index(drop=True)
pd.testing.assert_frame_equal(pdf, expected)
def test_to_pandas_partitioned(tmp_path):
pdf1 =
|
pd.DataFrame({"a": [1, 2, 3]})
|
pandas.DataFrame
|
import pandas as pd
import json
import urllib3
from time import sleep
http = urllib3.PoolManager()
col_data = pd.DataFrame()
mat_data = pd.DataFrame()
periods = ["1","2"]
pages = ["1","2","3","4","5","6","7","8","9","10"]
leagueId="8048"
eventId=""
dat_url = pd.read_csv("/spiders/tmp/match_details.csv")
eventId_gp= [str(url).split("/")[6] for url in dat_url["url"]]
len_url= len(eventId_gp)
for count in range(len_url):
eventId = eventId_gp[count]
print(count)
for period in periods:
for page in pages:
sleep(15)
col_data = pd.DataFrame()
match_dat= http.request('GET', 'https://hsapi.espncricinfo.com/v1/pages/match/comments?lang=en&leagueId='+leagueId+'&eventId='+eventId+'&period=' +period+ '&page='+page+'&filter=full&liveTest=false')
if(len(match_dat.data)<100):
break
data = json.loads(match_dat.data)
df = pd.json_normalize(data['comments'])
bowler=[]
batsman=[]
for bat,bowl in zip(df["currentBatsmen"],df["currentBowlers"]):
batsman.append(bat[0]["name"])
bowler.append(bowl[0]["name"])
df["bowler"]= bowler
df["batsman"] = batsman
col_data = df.copy()
if(period=="1"):
df["innings"]=1
else:
df["innings"]=2
if("matchWicket.text" in col_data.columns):
col_data["matchWicket.text"].fillna("NA",inplace=True)
col_data["run_out"]= ["Yes" if "run out" in wicket_text else "No" for wicket_text in col_data["matchWicket.text"]]
else:
col_data["matchWicket.text"]="NA"
col_data["run_out"]="No"
col_data["match_id"] = eventId
mat_data =
|
pd.concat([mat_data,col_data])
|
pandas.concat
|
import pandas as pd
ratings = pd.read_csv('dataset/ratings.csv')
movies =
|
pd.read_csv('dataset/movies.csv')
|
pandas.read_csv
|
import numpy as np
import pytest
from pandas._libs.tslibs import IncompatibleFrequency
from pandas import (
DatetimeIndex,
Series,
Timestamp,
date_range,
isna,
notna,
offsets,
)
import pandas._testing as tm
class TestSeriesAsof:
def test_asof_nanosecond_index_access(self):
ts = Timestamp("20130101").value
dti = DatetimeIndex([ts + 50 + i for i in range(100)])
ser = Series(np.random.randn(100), index=dti)
first_value = ser.asof(ser.index[0])
# GH#46903 previously incorrectly was "day"
assert dti.resolution == "nanosecond"
# this used to not work bc parsing was done by dateutil that didn't
# handle nanoseconds
assert first_value == ser["2013-01-01 00:00:00.000000050"]
expected_ts = np.datetime64("2013-01-01 00:00:00.000000050", "ns")
assert first_value == ser[Timestamp(expected_ts)]
def test_basic(self):
# array or list or dates
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
ts = Series(np.random.randn(N), index=rng)
ts.iloc[15:30] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
mask = (result.index >= lb) & (result.index < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
val = result[result.index[result.index >= ub][0]]
assert ts[ub] == val
def test_scalar(self):
N = 30
rng = date_range("1/1/1990", periods=N, freq="53s")
ts = Series(np.arange(N), index=rng)
ts.iloc[5:10] = np.NaN
ts.iloc[15:20] = np.NaN
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
result = ts.asof(ts.index[3])
assert result == ts[3]
# no as of value
d = ts.index[0] - offsets.BDay()
assert np.isnan(ts.asof(d))
def test_with_nan(self):
# basic asof test
rng = date_range("1/1/2000", "1/2/2000", freq="4h")
s = Series(np.arange(len(rng)), index=rng)
r = s.resample("2h").mean()
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
r.iloc[3:5] = np.nan
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 5, 5, 6.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
r.iloc[-3:] = np.nan
result = r.asof(r.index)
expected = Series(
[0, 0, 1, 1, 1, 1, 3, 3, 4, 4, 4, 4, 4.0],
index=date_range("1/1/2000", "1/2/2000", freq="2h"),
)
tm.assert_series_equal(result, expected)
def test_periodindex(self):
from pandas import (
PeriodIndex,
period_range,
)
# array or list or dates
N = 50
rng = period_range("1/1/1990", periods=N, freq="H")
ts = Series(np.random.randn(N), index=rng)
ts.iloc[15:30] = np.nan
dates = date_range("1/1/1990", periods=N * 3, freq="37min")
result = ts.asof(dates)
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
result = ts.asof(list(dates))
assert notna(result).all()
lb = ts.index[14]
ub = ts.index[30]
pix = PeriodIndex(result.index.values, freq="H")
mask = (pix >= lb) & (pix < ub)
rs = result[mask]
assert (rs == ts[lb]).all()
ts.iloc[5:10] = np.nan
ts.iloc[15:20] = np.nan
val1 = ts.asof(ts.index[7])
val2 = ts.asof(ts.index[19])
assert val1 == ts[4]
assert val2 == ts[14]
# accepts strings
val1 = ts.asof(str(ts.index[7]))
assert val1 == ts[4]
# in there
assert ts.asof(ts.index[3]) == ts[3]
# no as of value
d = ts.index[0].to_timestamp() - offsets.BDay()
assert isna(ts.asof(d))
# Mismatched freq
msg = "Input has different freq"
with pytest.raises(IncompatibleFrequency, match=msg):
ts.asof(rng.asfreq("D"))
def test_errors(self):
s = Series(
[1, 2, 3],
index=[Timestamp("20130101"), Timestamp("20130103"), Timestamp("20130102")],
)
# non-monotonic
assert not s.index.is_monotonic_increasing
with pytest.raises(ValueError, match="requires a sorted index"):
s.asof(s.index[0])
# subset with Series
N = 10
rng = date_range("1/1/1990", periods=N, freq="53s")
s = Series(np.random.randn(N), index=rng)
with pytest.raises(ValueError, match="not valid for Series"):
s.asof(s.index[0], subset="foo")
def test_all_nans(self):
# GH 15713
# series is all nans
# testing non-default indexes
N = 50
rng = date_range("1/1/1990", periods=N, freq="53s")
dates = date_range("1/1/1990", periods=N * 3, freq="25s")
result = Series(np.nan, index=rng).asof(dates)
expected = Series(np.nan, index=dates)
tm.assert_series_equal(result, expected)
# testing scalar input
date = date_range("1/1/1990", periods=N * 3, freq="25s")[0]
result =
|
Series(np.nan, index=rng)
|
pandas.Series
|
import pytest
from typing import List, Tuple
import pandas as pd
from nerblackbox.modules.datasets.formatter.auto_formatter import AutoFormatter
from nerblackbox.modules.datasets.formatter.base_formatter import (
BaseFormatter,
SENTENCES_ROWS_PRETOKENIZED,
)
from nerblackbox.modules.datasets.formatter.conll2003_formatter import (
CoNLL2003Formatter,
)
from nerblackbox.modules.datasets.formatter.swe_nerc_formatter import SweNercFormatter
from nerblackbox.modules.datasets.formatter.swedish_ner_corpus_formatter import (
SwedishNerCorpusFormatter,
)
from nerblackbox.modules.datasets.formatter.sic_formatter import SICFormatter
from nerblackbox.modules.datasets.formatter.suc_formatter import SUCFormatter
from nerblackbox.modules.datasets.formatter.sucx_formatter import SUCXFormatter
from nerblackbox.modules.datasets.formatter.huggingface_datasets_formatter import (
HuggingfaceDatasetsFormatter,
)
from pkg_resources import resource_filename
import os
from os.path import abspath, dirname, join
BASE_DIR = abspath(dirname(dirname(dirname(__file__))))
DATA_DIR = join(BASE_DIR, "data")
os.environ["DATA_DIR"] = DATA_DIR
class TestAutoFormatter:
@pytest.mark.parametrize(
"ner_dataset, ner_dataset_subset, error",
[
("swedish_ner_corpus", "", False),
("conll2003", "", False),
("sic", "", False),
("suc", "", False),
("suc", "xyz", False), # ner_dataset_subset is not used
("sucx", "original_cased", False),
("sucx", "", True),
("swe_nerc", "", False),
("xyz", "", True),
("ehealth_kd", "", False),
("sent_comp", "", True),
],
)
def test_for_dataset(self, ner_dataset: str, ner_dataset_subset: str, error: bool):
if error:
with pytest.raises(Exception):
_ = AutoFormatter.for_dataset(
ner_dataset=ner_dataset, ner_dataset_subset=ner_dataset_subset
)
else:
auto_formatter = AutoFormatter.for_dataset(
ner_dataset=ner_dataset, ner_dataset_subset=ner_dataset_subset
)
assert isinstance(
auto_formatter, BaseFormatter
), f"ERROR! type(auto_formatter) = {type(auto_formatter)} != BaseFormatter"
assert (
auto_formatter.ner_dataset == ner_dataset
), f"ERROR! auto_formatter.ner_dataset = {auto_formatter.ner_dataset} != {ner_dataset}"
class TestBaseFormatter:
base_formatter = SwedishNerCorpusFormatter()
base_formatter.dataset_path = resource_filename(
"nerblackbox", f"tests/test_data/formatted_data"
)
@pytest.mark.parametrize(
"phases, df_formatted",
[
(
["train"],
pd.DataFrame(
data=[
["O O", "Mening 1"],
["PER O", "Mening 2"],
["O PER", "Mening 3"],
["O PER", "Mening 4"],
]
),
),
(["val"], pd.DataFrame(data=[["O O", "Mening 5"], ["PER O", "Mening 6"]])),
(["test"], pd.DataFrame(data=[["O O", "Mening 7"], ["PER O", "Mening 8"]])),
(
["val", "test"],
pd.DataFrame(
data=[
["O O", "Mening 5"],
["PER O", "Mening 6"],
["O O", "Mening 7"],
["PER O", "Mening 8"],
]
),
),
(
["test", "val"],
pd.DataFrame(
data=[
["O O", "Mening 7"],
["PER O", "Mening 8"],
["O O", "Mening 5"],
["PER O", "Mening 6"],
]
),
),
],
)
def test_read_formatted_csvs(self, phases: List[str], df_formatted: pd.DataFrame):
test_df_formatted = self.base_formatter._read_formatted_csvs(phases)
pd.testing.assert_frame_equal(
test_df_formatted, df_formatted
), f"ERROR! test_read_formatted did not pass test for phases = {phases}"
@pytest.mark.parametrize(
"df_original, val_fraction, df_new, df_val",
[
(
pd.DataFrame(data=[[1, 2], [3, 4]], columns=["A", "B"], index=[0, 1]),
0.5,
pd.DataFrame(data=[[1, 2]], columns=["A", "B"], index=[0]),
|
pd.DataFrame(data=[[3, 4]], columns=["A", "B"], index=[1])
|
pandas.DataFrame
|
from collections import (
abc,
deque,
)
from decimal import Decimal
from warnings import catch_warnings
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
concat,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import SparseArray
from pandas.core.construction import create_series_with_explicit_dtype
from pandas.tests.extension.decimal import to_decimal
class TestConcatenate:
def test_append_concat(self):
# GH#1815
d1 = date_range("12/31/1990", "12/31/1999", freq="A-DEC")
d2 = date_range("12/31/2000", "12/31/2009", freq="A-DEC")
s1 = Series(np.random.randn(10), d1)
s2 = Series(np.random.randn(10), d2)
s1 = s1.to_period()
s2 = s2.to_period()
# drops index
result = concat([s1, s2])
assert isinstance(result.index, PeriodIndex)
assert result.index[0] == s1.index[0]
def test_concat_copy(self, using_array_manager):
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randint(0, 10, size=4).reshape(4, 1))
df3 = DataFrame({5: "foo"}, index=range(4))
# These are actual copies.
result = concat([df, df2, df3], axis=1, copy=True)
for arr in result._mgr.arrays:
assert arr.base is None
# These are the same.
result = concat([df, df2, df3], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
assert arr.base is df._mgr.arrays[0].base
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
if using_array_manager:
# we get the same array object, which has no base
assert arr is df3._mgr.arrays[0]
else:
assert arr.base is not None
# Float block was consolidated.
df4 = DataFrame(np.random.randn(4, 1))
result = concat([df, df2, df3, df4], axis=1, copy=False)
for arr in result._mgr.arrays:
if arr.dtype.kind == "f":
if using_array_manager:
# this is a view on some array in either df or df4
assert any(
np.shares_memory(arr, other)
for other in df._mgr.arrays + df4._mgr.arrays
)
else:
# the block was consolidated, so we got a copy anyway
assert arr.base is None
elif arr.dtype.kind in ["i", "u"]:
assert arr.base is df2._mgr.arrays[0].base
elif arr.dtype == object:
# this is a view on df3
assert any(np.shares_memory(arr, other) for other in df3._mgr.arrays)
def test_concat_with_group_keys(self):
# axis=0
df = DataFrame(np.random.randn(3, 4))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1])
exp_index = MultiIndex.from_arrays(
[[0, 0, 0, 1, 1, 1, 1], [0, 1, 2, 0, 1, 2, 3]]
)
expected = DataFrame(np.r_[df.values, df2.values], index=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1])
exp_index2 = MultiIndex.from_arrays([[0, 0, 0, 1, 1, 1], [0, 1, 2, 0, 1, 2]])
expected = DataFrame(np.r_[df.values, df.values], index=exp_index2)
tm.assert_frame_equal(result, expected)
# axis=1
df = DataFrame(np.random.randn(4, 3))
df2 = DataFrame(np.random.randn(4, 4))
result = concat([df, df2], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df2.values], columns=exp_index)
tm.assert_frame_equal(result, expected)
result = concat([df, df], keys=[0, 1], axis=1)
expected = DataFrame(np.c_[df.values, df.values], columns=exp_index2)
tm.assert_frame_equal(result, expected)
def test_concat_keys_specific_levels(self):
df = DataFrame(np.random.randn(10, 4))
pieces = [df.iloc[:, [0, 1]], df.iloc[:, [2]], df.iloc[:, [3]]]
level = ["three", "two", "one", "zero"]
result = concat(
pieces,
axis=1,
keys=["one", "two", "three"],
levels=[level],
names=["group_key"],
)
tm.assert_index_equal(result.columns.levels[0], Index(level, name="group_key"))
tm.assert_index_equal(result.columns.levels[1], Index([0, 1, 2, 3]))
assert result.columns.names == ["group_key", None]
@pytest.mark.parametrize("mapping", ["mapping", "dict"])
def test_concat_mapping(self, mapping, non_dict_mapping_subclass):
constructor = dict if mapping == "dict" else non_dict_mapping_subclass
frames = constructor(
{
"foo": DataFrame(np.random.randn(4, 3)),
"bar": DataFrame(np.random.randn(4, 3)),
"baz": DataFrame(np.random.randn(4, 3)),
"qux": DataFrame(np.random.randn(4, 3)),
}
)
sorted_keys = list(frames.keys())
result = concat(frames)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys)
tm.assert_frame_equal(result, expected)
result = concat(frames, axis=1)
expected = concat([frames[k] for k in sorted_keys], keys=sorted_keys, axis=1)
tm.assert_frame_equal(result, expected)
keys = ["baz", "foo", "bar"]
result = concat(frames, keys=keys)
expected = concat([frames[k] for k in keys], keys=keys)
tm.assert_frame_equal(result, expected)
def test_concat_keys_and_levels(self):
df = DataFrame(np.random.randn(1, 3))
df2 = DataFrame(np.random.randn(1, 4))
levels = [["foo", "baz"], ["one", "two"]]
names = ["first", "second"]
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
names=names,
)
expected = concat([df, df2, df, df2])
exp_index = MultiIndex(
levels=levels + [[0]],
codes=[[0, 0, 1, 1], [0, 1, 0, 1], [0, 0, 0, 0]],
names=names + [None],
)
expected.index = exp_index
tm.assert_frame_equal(result, expected)
# no names
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
levels=levels,
)
assert result.index.names == (None,) * 3
# no levels
result = concat(
[df, df2, df, df2],
keys=[("foo", "one"), ("foo", "two"), ("baz", "one"), ("baz", "two")],
names=["first", "second"],
)
assert result.index.names == ("first", "second", None)
tm.assert_index_equal(
result.index.levels[0], Index(["baz", "foo"], name="first")
)
def test_concat_keys_levels_no_overlap(self):
# GH #1406
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
msg = "Values not found in passed level"
with pytest.raises(ValueError, match=msg):
concat([df, df], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
msg = "Key one not in level"
with pytest.raises(ValueError, match=msg):
concat([df, df2], keys=["one", "two"], levels=[["foo", "bar", "baz"]])
def test_crossed_dtypes_weird_corner(self):
columns = ["A", "B", "C", "D"]
df1 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="f8"),
"B": np.array([1, 2, 3, 4], dtype="i8"),
"C": np.array([1, 2, 3, 4], dtype="f8"),
"D": np.array([1, 2, 3, 4], dtype="i8"),
},
columns=columns,
)
df2 = DataFrame(
{
"A": np.array([1, 2, 3, 4], dtype="i8"),
"B": np.array([1, 2, 3, 4], dtype="f8"),
"C": np.array([1, 2, 3, 4], dtype="i8"),
"D": np.array([1, 2, 3, 4], dtype="f8"),
},
columns=columns,
)
appended = df1.append(df2, ignore_index=True)
expected = DataFrame(
np.concatenate([df1.values, df2.values], axis=0), columns=columns
)
tm.assert_frame_equal(appended, expected)
df = DataFrame(np.random.randn(1, 3), index=["a"])
df2 = DataFrame(np.random.randn(1, 4), index=["b"])
result = concat([df, df2], keys=["one", "two"], names=["first", "second"])
assert result.index.names == ("first", "second")
def test_with_mixed_tuples(self, sort):
# 10697
# columns have mixed tuples, so handle properly
df1 = DataFrame({"A": "foo", ("B", 1): "bar"}, index=range(2))
df2 = DataFrame({"B": "foo", ("B", 1): "bar"}, index=range(2))
# it works
concat([df1, df2], sort=sort)
def test_concat_mixed_objs(self):
# concat mixed series/frames
# G2385
# axis 1
index = date_range("01-Jan-2013", periods=10, freq="H")
arr = np.arange(10, dtype="int64")
s1 = Series(arr, index=index)
s2 = Series(arr, index=index)
df = DataFrame(arr.reshape(-1, 1), index=index)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 0]
)
result = concat([df, df], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 2).reshape(-1, 2), index=index, columns=[0, 1]
)
result = concat([s1, s2], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
expected = DataFrame(
np.repeat(arr, 5).reshape(-1, 5), index=index, columns=[0, 0, 1, 2, 3]
)
result = concat([s1, df, s2, s2, s1], axis=1)
tm.assert_frame_equal(result, expected)
# with names
s1.name = "foo"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, 0]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
s2.name = "bar"
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=["foo", 0, "bar"]
)
result = concat([s1, df, s2], axis=1)
tm.assert_frame_equal(result, expected)
# ignore index
expected = DataFrame(
np.repeat(arr, 3).reshape(-1, 3), index=index, columns=[0, 1, 2]
)
result = concat([s1, df, s2], axis=1, ignore_index=True)
tm.assert_frame_equal(result, expected)
# axis 0
expected = DataFrame(
np.tile(arr, 3).reshape(-1, 1), index=index.tolist() * 3, columns=[0]
)
result = concat([s1, df, s2])
tm.assert_frame_equal(result, expected)
expected = DataFrame(np.tile(arr, 3).reshape(-1, 1), columns=[0])
result = concat([s1, df, s2], ignore_index=True)
|
tm.assert_frame_equal(result, expected)
|
pandas._testing.assert_frame_equal
|
"""
Module parse to/from Excel
"""
# ---------------------------------------------------------------------
# ExcelFile class
import abc
from datetime import date, datetime, time, timedelta
from distutils.version import LooseVersion
from io import UnsupportedOperation
import os
from textwrap import fill
import warnings
import numpy as np
import pandas._libs.json as json
import pandas.compat as compat
from pandas.compat import (
OrderedDict, add_metaclass, lrange, map, range, string_types, u, zip)
from pandas.errors import EmptyDataError
from pandas.util._decorators import Appender, deprecate_kwarg
from pandas.core.dtypes.common import (
is_bool, is_float, is_integer, is_list_like)
from pandas.core import config
from pandas.core.frame import DataFrame
from pandas.io.common import (
_NA_VALUES, _is_url, _stringify_path, _urlopen, _validate_header_arg,
get_filepath_or_buffer)
from pandas.io.formats.printing import pprint_thing
from pandas.io.parsers import TextParser
__all__ = ["read_excel", "ExcelWriter", "ExcelFile"]
_writer_extensions = ["xlsx", "xls", "xlsm"]
_writers = {}
_read_excel_doc = """
Read an Excel table into a pandas DataFrame
Parameters
----------
io : string, path object (pathlib.Path or py._path.local.LocalPath),
file-like object, pandas ExcelFile, or xlrd workbook.
The string could be a URL. Valid URL schemes include http, ftp, s3,
gcs, and file. For file URLs, a host is expected. For instance, a local
file could be file://localhost/path/to/workbook.xlsx
sheet_name : string, int, mixed list of strings/ints, or None, default 0
Strings are used for sheet names, Integers are used in zero-indexed
sheet positions.
Lists of strings/integers are used to request multiple sheets.
Specify None to get all sheets.
str|int -> DataFrame is returned.
list|None -> Dict of DataFrames is returned, with keys representing
sheets.
Available Cases
* Defaults to 0 -> 1st sheet as a DataFrame
* 1 -> 2nd sheet as a DataFrame
* "Sheet1" -> 1st sheet as a DataFrame
* [0,1,"Sheet5"] -> 1st, 2nd & 5th sheet as a dictionary of DataFrames
* None -> All sheets as a dictionary of DataFrames
sheetname : string, int, mixed list of strings/ints, or None, default 0
.. deprecated:: 0.21.0
Use `sheet_name` instead
header : int, list of ints, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None
index_col : int, list of ints, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
parse_cols : int or list, default None
.. deprecated:: 0.21.0
Pass in `usecols` instead.
usecols : int, str, list-like, or callable default None
* If None, then parse all columns,
* If int, then indicates last column to be parsed
.. deprecated:: 0.24.0
Pass in a list of ints instead from 0 to `usecols` inclusive.
* If string, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of ints, then indicates list of column numbers to be parsed.
* If list of strings, then indicates list of column names to be parsed.
.. versionadded:: 0.24.0
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
.. versionadded:: 0.24.0
squeeze : boolean, default False
If the parsed data only contains one column then return a Series
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
.. versionadded:: 0.20.0
engine : string, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True
.. versionadded:: 0.19.0
false_values : list, default None
Values to consider as False
.. versionadded:: 0.19.0
skiprows : list-like
Rows to skip at the beginning (0-indexed)
nrows : int, default None
Number of rows to parse
.. versionadded:: 0.23.0
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN: '""" + fill("', '".join(sorted(_NA_VALUES)), 70, subsequent_indent=" ") + """'.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : boolean, default False
Indicate number of NA values placed in non-numeric columns
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skip_footer : int, default 0
.. deprecated:: 0.23.0
Pass in `skipfooter` instead.
skipfooter : int, default 0
Rows at the end to skip (0-indexed)
convert_float : boolean, default True
convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally
mangle_dupe_cols : boolean, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
Returns
-------
parsed : DataFrame or Dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
Examples
--------
An example DataFrame written to a local file
>>> df_out = pd.DataFrame([('string1', 1),
... ('string2', 2),
... ('string3', 3)],
... columns=['Name', 'Value'])
>>> df_out
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> df_out.to_excel('tmp.xlsx')
The file can be read using the file name as string or an open file object:
>>> pd.read_excel('tmp.xlsx')
Name Value
0 string1 1
1 string2 2
2 string3 3
>>> pd.read_excel(open('tmp.xlsx','rb'))
Name Value
0 string1 1
1 string2 2
2 string3 3
Index and header can be specified via the `index_col` and `header` arguments
>>> pd.read_excel('tmp.xlsx', index_col=None, header=None)
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 string3 3
Column types are inferred but can be explicitly specified
>>> pd.read_excel('tmp.xlsx', dtype={'Name':str, 'Value':float})
Name Value
0 string1 1.0
1 string2 2.0
2 string3 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> pd.read_excel('tmp.xlsx',
... na_values=['string1', 'string2'])
Name Value
0 NaN 1
1 NaN 2
2 string3 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> df = pd.DataFrame({'a': ['1', '#2'], 'b': ['2', '3']})
>>> df.to_excel('tmp.xlsx', index=False)
>>> pd.read_excel('tmp.xlsx')
a b
0 1 2
1 #2 3
>>> pd.read_excel('tmp.xlsx', comment='#')
a b
0 1 2
"""
def register_writer(klass):
"""Adds engine to the excel writer registry. You must use this method to
integrate with ``to_excel``. Also adds config options for any new
``supported_extensions`` defined on the writer."""
if not compat.callable(klass):
raise ValueError("Can only register callables as engines")
engine_name = klass.engine
_writers[engine_name] = klass
for ext in klass.supported_extensions:
if ext.startswith('.'):
ext = ext[1:]
if ext not in _writer_extensions:
config.register_option("io.excel.{ext}.writer".format(ext=ext),
engine_name, validator=str)
_writer_extensions.append(ext)
def _get_default_writer(ext):
_default_writers = {'xlsx': 'openpyxl', 'xlsm': 'openpyxl', 'xls': 'xlwt'}
try:
import xlsxwriter # noqa
_default_writers['xlsx'] = 'xlsxwriter'
except ImportError:
pass
return _default_writers[ext]
def get_writer(engine_name):
try:
return _writers[engine_name]
except KeyError:
raise ValueError("No Excel writer '{engine}'"
.format(engine=engine_name))
@
|
Appender(_read_excel_doc)
|
pandas.util._decorators.Appender
|
import pandas as pd
from app import db
from app.fetcher.fetcher import Fetcher
from app.models import Umrti
class DeathsFetcher(Fetcher):
"""
Class for updating deaths table.
"""
DEATHS_CSV = 'https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/umrti.csv'
def __init__(self):
super().__init__(Umrti.__tablename__, self.DEATHS_CSV, check_date=False)
def fetch(self, import_id: int) -> None:
df = pd.read_csv(self._url)
vekova_skupina = pd.read_sql_query('select vekova_skupina, min_vek, max_vek from populace_kategorie', db.engine)
vekova_skupina['join'] = 0
vek = pd.Series(range(0, 151), name='vek').to_frame()
vek['join'] = 0
merged =
|
pd.merge(vek, vekova_skupina)
|
pandas.merge
|
import pandas as pd
c1 = pd.read_csv('machine/Calling/Sensors_1.csv')
c2 =
|
pd.read_csv('machine/Calling/Sensors_2.csv')
|
pandas.read_csv
|
import base64
import io
import textwrap
import dash
import dash_core_components as dcc
import dash_html_components as html
import gunicorn
import plotly.graph_objs as go
from dash.dependencies import Input, Output, State
import flask
import pandas as pd
import urllib.parse
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
import numpy as np
import math
import scipy.stats
import dash_table
from dash_table.Format import Format, Scheme
from colour import Color
import dash_bootstrap_components as dbc
# from waitress import serve
external_stylesheets = [dbc.themes.BOOTSTRAP, 'https://codepen.io/chriddyp/pen/bWLwgP.css',
"https://codepen.io/sutharson/pen/dyYzEGZ.css",
"https://fonts.googleapis.com/css2?family=Raleway&display=swap",
"https://codepen.io/chriddyp/pen/brPBPO.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# "external_url": "https://codepen.io/chriddyp/pen/brPBPO.css"
# https://raw.githubusercontent.com/aaml-analytics/pca-explorer/master/LoadingStatusStyleSheet.css
styles = {
'pre': {
'border': 'thin lightgrey solid',
'overflowX': 'scroll'
}
}
tabs_styles = {'height': '40px', 'font-family': 'Raleway', 'fontSize': 14}
tab_style = {
'borderBottom': '1px solid #d6d6d6',
'padding': '6px',
'Weight': 'bold'
}
tab_selected_style = {
'borderTop': '3px solid #333333',
'borderBottom': '1px solid #d6d6d6 ',
'backgroundColor': '#f6f6f6',
'color': '#333333',
# 'fontColor': '#004a4a',
'fontWeight': 'bold',
'padding': '6px'
}
# APP ABOUT DESCRIPTION
MOF_tool_about = textwrap.wrap(' These tools aim to provide a reproducible and consistent data visualisation platform '
'where experimental and computational researchers can use big data and statistical '
'analysis to find the best materials for specific applications. Principal Component '
'Analysis (PCA) is a dimension reduction technique that can be used to reduce a large '
'set of observable variables to a smaller set of latent variables that still contain '
'most of the information in the large set (feature extraction). This is done by '
'transforming a number of (possibly) correlated variables into some number of orthogonal '
'(uncorrelated) variables called principal components to find the directions of maximal '
'variance. PCA can be used to ease data visualisation by having fewer dimensions to plot '
'or be used as a pre-processing step before using another Machine Learning (ML)'
' algorithm for regression '
'and classification tasks. PCA can be used to improve an ML algorithm performance, '
'reduce overfitting and reduce noise in data.',
width=50)
Scree_plot_about = textwrap.wrap(' The Principal Component Analysis Visualisation Tools runs PCA for the user and '
'populates a Scree plot. This plot allows the user to determine if PCA is suitable '
'for '
'their dataset and if can compromise an X% drop in explained variance to '
'have fewer dimensions.', width=50)
Feature_correlation_filter = textwrap.wrap("Feature correlation heatmaps provide users with feature analysis and "
"feature principal component analysis. This tool will allow users to see the"
" correlation between variables and the"
" covariances/correlations between original variables and the "
"principal components (loadings)."
, width=50)
plots_analysis = textwrap.wrap('Users can keep all variables as features or drop certain variables to produce a '
'Biplot, cos2 plot and contribution plot. The score plot is used to look for clusters, '
'trends, and outliers in the first two principal components. The loading plot is used to'
' visually interpret the first two principal components. The biplot overlays the score '
'plot and the loading plot on the same graph. The squared cosine (cos2) plot shows '
'the importance of a component for a given observation i.e. measures '
'how much a variable is represented in a component. The contribution plot contains the '
'contributions (%) of the variables to the principal components', width=50, )
data_table_download = textwrap.wrap("The user's inputs from the 'Plots' tab will provide the output of the data tables."
" The user can download the scores, eigenvalues, explained variance, "
"cumulative explained variance, loadings, "
"cos2 and contributions from the populated data tables. "
"Note: Wait for user inputs to be"
" computed (faded tab app will return to the original colour) before downloading the"
" data tables. ", width=50)
MOF_GH = textwrap.wrap(" to explore AAML's sample data and read more on"
" AAML's Principal Component Analysis Visualisation Tool Manual, FAQ's & Troubleshooting"
" on GitHub... ", width=50)
####################
# APP LAYOUT #
####################
fig = go.Figure()
fig1 = go.Figure()
app.layout = html.Div([
html.Div([
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/UOC.png',
height='35', width='140', style={'display': 'inline-block', 'padding-left': '1%'}),
html.Img(src='https://raw.githubusercontent.com/aaml-analytics/mof-explorer/master/A2ML-logo.png',
height='50', width='125', style={'float': 'right', 'display': 'inline-block', 'padding-right': '2%'}),
html.H1("Principal Component Analysis Visualisation Tools",
style={'display': 'inline-block', 'padding-left': '11%', 'text-align': 'center', 'fontSize': 36,
'color': 'white', 'font-family': 'Raleway'}),
html.H1("...", style={'fontColor': '#3c3c3c', 'fontSize': 6})
], style={'backgroundColor': '#333333'}),
html.Div([html.A('Refresh', href='/')], style={}),
html.Div([
html.H2("Upload Data", style={'fontSize': 24, 'font-family': 'Raleway', 'color': '#333333'}, ),
html.H3("Upload .txt, .csv or .xls files to starting exploring data...", style={'fontSize': 16,
'font-family': 'Raleway'}),
dcc.Store(id='csv-data', storage_type='session', data=None),
html.Div([dcc.Upload(
id='data-table-upload',
children=html.Div([html.Button('Upload File')],
style={'height': "60px", 'borderWidth': '1px',
'borderRadius': '5px',
'textAlign': 'center',
}),
multiple=False
),
html.Div(id='output-data-upload'),
]), ], style={'display': 'inline-block', 'padding-left': '1%', }),
html.Div([dcc.Tabs([
dcc.Tab(label='About', style=tab_style, selected_style=tab_selected_style,
children=[html.Div([html.H2(" What are AAML's Principal Component Analysis Visualisation Tools?",
style={'fontSize': 18, 'font-family': 'Raleway', 'font-weight': 'bold'
}),
html.Div([' '.join(MOF_tool_about)]
, style={'font-family': 'Raleway'}),
html.H2(["Scree Plot"],
style={'fontSize': 18,
'font-family': 'Raleway', 'font-weight': 'bold'}),
html.Div([' '.join(Scree_plot_about)], style={'font-family': 'Raleway'}),
html.H2(["Feature Correlation"], style={'fontSize': 18,
'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(Feature_correlation_filter)], style={'font-family': 'Raleway', }),
html.H2(["Plots"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(plots_analysis)], style={'font-family': 'Raleway'}),
html.H2(["Data tables"],
style={'fontSize': 18, 'font-weight': 'bold',
'font-family': 'Raleway'}),
html.Div([' '.join(data_table_download)], style={'font-family': 'Raleway'}),
# ADD LINK
html.Div([html.Plaintext(
[' Click ', html.A('here ',
href='https://github.com/aaml-analytics/pca-explorer')],
style={'display': 'inline-block',
'fontSize': 14, 'font-family': 'Raleway'}),
html.Div([' '.join(MOF_GH)], style={'display': 'inline-block',
'fontSize': 14,
'font-family': 'Raleway'}),
html.Img(
src='https://raw.githubusercontent.com/aaml-analytics/mof'
'-explorer/master/github.png',
height='40', width='40',
style={'display': 'inline-block', 'float': "right"
})
]
, style={'display': 'inline-block'})
], style={'backgroundColor': '#ffffff', 'padding-left': '1%'}
)]),
dcc.Tab(label='Scree Plot', style=tab_style, selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='PC-Eigen-plot')
],
style={'display': 'inline-block',
'width': '49%'}),
html.Div([dcc.Graph(id='PC-Var-plot')
], style={'display': 'inline-block', 'float': 'right',
'width': '49%'}),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:", dcc.RadioItems(
id='outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '49%', 'padding-left': '1%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-scree',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Label(["You should attempt to use at least..."
, html.Div(id='var-output-container-filter')])
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["As a rule of thumb for the Scree Plot"
" Eigenvalues, the point where the slope of the curve "
"is clearly "
"leveling off (the elbow), indicates the number of "
"components that "
"should be retained as significant."])
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Feature correlation', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([html.Div([dcc.Graph(id='PC-feature-heatmap')
], style={'width': '47%',
'display': 'inline-block',
'float': 'right'}),
html.Div([dcc.Graph(id='feature-heatmap')
], style={'width': '51%',
'display': 'inline-block',
'float': 'left'}),
html.Div([html.Label(["Loading colour bar range:"
, html.Div(
id='color-range-container')])
], style={
'fontSize': 12,
'float': 'right',
'width': '100%',
'padding-left': '85%'}
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='PC-feature-outlier-value',
options=[{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label(
["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-heatmap',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([html.Label(["Select color scale:",
dcc.RadioItems(
id='colorscale',
options=[{'label': i, 'value': i}
for i in
['Viridis', 'Plasma']],
value='Plasma'
)]),
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("There are usually two ways multicollinearity, "
"which is when there are a number of variables "
"that are highly correlated, is dealt with:"),
html.P("1) Use PCA to obtain a set of orthogonal ("
"not correlated) variables to analyse."),
html.P("2) Use correlation of determination (R²) to "
"determine which variables are highly "
"correlated and use only 1 in analysis. "
"Cut off for highly correlated variables "
"is ~0.7."),
html.P(
"In any case, it depends on the machine learning algorithm you may apply later. For correlation robust algorithms,"
" such as Random Forest, correlation of features will not be a concern. For non-correlation robust algorithms such as Linear Discriminant Analysis, "
"all high correlation variables should be removed.")
], style={'padding-left': '1%'}
),
html.Div([
html.Label(["Note: Data has been standardised (scale)"])
], style={'padding-left': '1%'})
])
]),
dcc.Tab(label='Plots', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([html.P("Selecting Features")], style={'padding-left': '1%',
'font-weight': 'bold'}),
html.Div([
html.P("Input here affects all plots, datatables and downloadable data output"),
html.Label([
"Would you like to analyse all variables or choose custom variables to "
"analyse:",
dcc.RadioItems(
id='all-custom-choice',
options=[{'label': 'All',
'value': 'All'},
{'label': 'Custom',
'value': 'Custom'}],
value='All'
)])
], style={'padding-left': '1%'}),
html.Div([
html.P("For custom variables input variables you would not like as features in your PCA:"),
html.Label(
[
"Note: Only input numerical variables (non-numerical variables have already "
"been removed from your dataframe)",
dcc.Dropdown(id='feature-input',
multi=True,
)])
], style={'padding': 10, 'padding-left': '1%'}),
]), dcc.Tabs(id='sub-tabs1', style=tabs_styles,
children=[
dcc.Tab(label='Biplot (Scores + loadings)', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='biplot', figure=fig)
], style={'height': '100%', 'width': '75%',
'padding-left': '20%'},
),
html.Div(
[html.Label(
["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-biplot',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-biplot',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '39%', }),
html.Div([
html.Label([
"Graph Update to show either loadings (Loading Plot) or "
"scores and loadings (Biplot):",
dcc.RadioItems(
id='customvar-graph-update',
options=[{'label': 'Biplot',
'value': 'Biplot'},
{'label': 'Loadings',
'value': 'Loadings'}],
value='Biplot')
])
], style={'display': 'inline-block',
'width': '29%', 'padding-left': '1%'}),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix. PCA is an unsupervised machine learning technique - it only "
"looks at the input features and does not take "
"into account the output or the target"
" (response) variable.")],
style={'padding-left': '1%'}),
html.Div([
html.P("For variables you have dropped..."),
html.Label([
"Would you like to introduce a first target variable"
" into your data visualisation?"
" (Graph type must be Biplot): "
"",
dcc.RadioItems(
id='radio-target-item',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select first target variable for color scale of scores: ",
dcc.Dropdown(
id='color-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Would you like to introduce a second target variable"
" into your data visualisation??"
" (Graph type must be Biplot):",
dcc.RadioItems(
id='radio-target-item-second',
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([
html.Label([
"Select second target variable for size scale of scores:",
dcc.Dropdown(
id='size-scale-scores',
)])
], style={'width': '49%', 'padding-left': '1%',
'display': 'inline-block'}),
html.Div([html.Label(["Size range:"
, html.Div(
id='size-second-target-container')])
], style={'display': 'inline-block',
'float': 'right',
'padding-right': '5%'}
),
html.Div([
html.Br(),
html.P(
"A loading plot shows how "
"strongly each characteristic (variable)"
" influences a principal component. The angles between the vectors"
" tell us how characteristics correlate with one another: "),
html.P("1) When two vectors are close, forming a small angle, the two "
"variables they represent are positively correlated. "),
html.P(
"2) If they meet each other at 90°, they are not likely to be correlated. "),
html.P(
"3) When they diverge and form a large angle (close to 180°), they are negative correlated."),
html.P(
"The Score Plot involves the projection of the data onto the PCs in two dimensions."
"The plot contains the original data but in the rotated (PC) coordinate system"),
html.P(
"A biplot merges a score plot and loading plot together.")
], style={'padding-left': '1%'}
),
]),
dcc.Tab(label='Cos2', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='cos2-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-cos2',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
])
], style={'display': 'inline-block',
'padding-left': '1%',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-cos2',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The squared cosine shows the importance of a "
"component for a given observation i.e. "
"measures "
" how much a variable is represented in a "
"component")
], style={'padding-left': '1%'}),
]),
dcc.Tab(label='Contribution', style=tab_style,
selected_style=tab_selected_style,
children=[
html.Div([dcc.Graph(id='contrib-plot', figure=fig)
], style={'width': '65%',
'padding-left': '25%'},
),
html.Div(
[html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(
id='outlier-value-contrib',
options=[
{'label': 'Yes', 'value': 'Yes'},
{'label': 'No', 'value': 'No'}],
value='No')
], style={'padding-left': '1%'})
], style={'display': 'inline-block',
'width': '49%'}),
html.Div([html.Label([
"Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-contrib',
options=[{'label': 'Correlation',
'value': 'Correlation'},
{'label': 'Covariance',
'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.P("The contribution plot contains the "
"contributions (in percentage) of the "
"variables to the principal components")
], style={'padding-left': '1%'}),
])
])
]),
dcc.Tab(label='Data tables', style=tab_style,
selected_style=tab_selected_style,
children=[html.Div([
html.Div([
html.Label(
["Note: Input in 'Plots' tab will provide output of data tables and the"
" downloadable PCA data"])
], style={'font-weight': 'bold', 'padding-left': '1%'}),
html.Div([html.A(
'Download PCA Data (scores for each principal component)',
id='download-link',
href="",
target="_blank"
)], style={'padding-left': '1%'}),
html.Div([html.Label(["Remove outliers (if any) in analysis:",
dcc.RadioItems(id="eigenA-outlier",
options=[{'label': 'Yes',
'value': 'Yes'},
{'label': 'No',
'value': 'No'}],
value='No'
)])], style={'padding-left': '1%',
'display': 'inline-block', 'width': '49%'}),
html.Div([html.Label(["Select the type of matrix used to calculate the principal components:",
dcc.RadioItems(
id='matrix-type-data-table',
options=[{'label': 'Correlation', 'value': 'Correlation'},
{'label': 'Covariance', 'value': 'Covariance'}],
value='Correlation')
])], style={'display': 'inline-block',
'width': '49%', }),
html.Div([html.P(
"Note: Use a correlation matrix when your variables have different scales and you want to weight "
"all the variables equally. Use a covariance matrix when your variables have different scales and"
" you want to give more emphasis to variables with higher variances. When unsure"
" use a correlation matrix.")],
style={'padding-left': '1%'}),
html.Div([
html.Div([
html.Label(["Correlation between Features"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-correlation',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-correlation-container'),
]),
html.Div([html.A(
'Download Feature Correlation data',
id='download-link-correlation',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Eigen Analysis of the correlation matrix"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-eigenA',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-eigenA-container'),
]),
html.Div([html.A(
'Download Eigen Analysis data',
id='download-link-eigenA',
href="",
download='Eigen_Analysis_data.csv',
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Loadings (Feature and PC correlation) from PCA"]),
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-loadings',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-loadings-container'),
]),
html.Div([html.A(
'Download Loadings data',
id='download-link-loadings',
download='Loadings_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Cos2 from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-cos2',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-cos2-container'),
]),
html.Div([html.A(
'Download Cos2 data',
id='download-link-cos2',
download='Cos2_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
html.Div([
html.Div([
html.Label(["Contributions from PCA"])
], style={'font-weight': 'bold'}),
html.Div([
dash_table.DataTable(id='data-table-contrib',
editable=False,
filter_action='native',
sort_action='native',
sort_mode='multi',
selected_columns=[],
selected_rows=[],
page_action='native',
column_selectable='single',
page_current=0,
page_size=20,
style_data={'height': 'auto'},
style_table={'overflowX': 'scroll',
'maxHeight': '300px',
'overflowY': 'scroll'},
style_cell={
'minWidth': '0px', 'maxWidth': '220px',
'whiteSpace': 'normal',
}
),
html.Div(id='data-table-contrib-container'),
]),
html.Div([html.A(
'Download Contributions data',
id='download-link-contrib',
download='Contributions_data.csv',
href="",
target="_blank"
)]),
], style={'padding': 20}),
])])
])
], style={'font-family': 'Raleway'})])
# READ FILE
def parse_contents(contents, filename):
content_type, content_string = contents.split(',')
decoded = base64.b64decode(content_string)
try:
if 'csv' in filename:
# Assume that the user uploaded a CSV file
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')))
df.fillna(0)
elif 'xls' in filename:
# Assume that the user uploaded an excel file
df = pd.read_excel(io.BytesIO(decoded))
df.fillna(0)
elif 'txt' or 'tsv' in filename:
df = pd.read_csv(io.StringIO(decoded.decode('utf-8')), delimiter=r'\s+')
df.fillna(0)
except Exception as e:
print(e)
return html.Div([
'There was an error processing this file.'
])
return df
@app.callback(Output('csv-data', 'data'),
[Input('data-table-upload', 'contents')],
[State('data-table-upload', 'filename')])
def parse_uploaded_file(contents, filename):
if not filename:
return dash.no_update
df = parse_contents(contents, filename)
df.fillna(0)
return df.to_json(date_format='iso', orient='split')
@app.callback(Output('PC-Var-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
data = Var_dff
elif outlier == 'Yes' and matrix_type == 'Correlation':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
data = Var_dff_outlier
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
data = Var_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
data = Var_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Cumulative Proportion of Explained Variance'],
mode='lines', line=dict(color='Red')))
return {'data': traces,
'layout': go.Layout(title='<b>Cumulative Scree Plot Proportion of Explained Variance</b>',
titlefont=dict(family='Helvetica', size=16),
xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True
}, yaxis={'title': 'Cumulative Explained Variance',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True,
'range': [0, 100]},
hovermode='closest', font=dict(family="Helvetica"), template="simple_white")
}
@app.callback(
Output('var-output-container-filter', 'children'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')],
)
def update_output(outlier, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == 'Correlation':
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int)
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
elif outlier == 'No' and matrix_type == 'Covariance':
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_covar)
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
return "'{}' principal components (≥70% of explained variance) to avoid losing too much of your " \
"data. Note that there is no required threshold in order for PCA to be valid." \
" ".format(PC_interp_int_outlier)
@app.callback(Output('PC-Eigen-plot', 'figure'),
[Input('outlier-value', 'value'),
Input('matrix-type-scree', 'value'),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, matrix_type, data):
traces = []
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if outlier == 'No' and matrix_type == "Correlation":
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
Var = pca.explained_variance_ratio_
PC_df = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features))], columns=['Principal Component'])
PC_num = [float(i + 1) for i in range(len(features))]
Var_df = pd.DataFrame(data=Var, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum = Var_df.cumsum()
Var_dff = pd.concat([PC_df, (Var_cumsum * 100)], axis=1)
PC_interp = np.interp(70, Var_dff['Cumulative Proportion of Explained Variance'], PC_num)
PC_interp_int = math.ceil(PC_interp)
eigenvalues = pca.explained_variance_
Eigen_df = pd.DataFrame(data=eigenvalues, columns=['Eigenvalues'])
Eigen_dff = pd.concat([PC_df, Eigen_df], axis=1)
data = Eigen_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier].values
# Separating out the target (if any)
y_outlier = outlier_dff.loc[:, ].values
# Standardizing the features
x_outlier = StandardScaler().fit_transform(x_outlier)
pca_outlier = PCA(n_components=len(features_outlier))
principalComponents_outlier = pca_outlier.fit_transform(x_outlier)
principalDf_outlier = pd.DataFrame(data=principalComponents_outlier
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier = pd.concat([outlier_names, principalDf_outlier], axis=1)
dfff_outlier = finalDf_outlier
# calculating loading
loading_outlier = pca_outlier.components_.T * np.sqrt(pca_outlier.explained_variance_)
loading_df_outlier = pd.DataFrame(data=loading_outlier[0:, 0:], index=features_outlier,
columns=['PC' + str(i + 1) for i in range(loading_outlier.shape[1])])
loading_dff_outlier = loading_df_outlier.T
Var_outlier = pca_outlier.explained_variance_ratio_
PC_df_outlier = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier))],
columns=['Principal Component'])
PC_num_outlier = [float(i + 1) for i in range(len(features_outlier))]
Var_df_outlier = pd.DataFrame(data=Var_outlier, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier = Var_df_outlier.cumsum()
Var_dff_outlier = pd.concat([PC_df_outlier, (Var_cumsum_outlier * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier['Cumulative Proportion of Explained Variance'],
PC_num_outlier)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier = pca_outlier.explained_variance_
Eigen_df_outlier = pd.DataFrame(data=eigenvalues_outlier, columns=['Eigenvalues'])
Eigen_dff_outlier = pd.concat([PC_df_outlier, Eigen_df_outlier], axis=1)
data = Eigen_dff_outlier
elif outlier == 'No' and matrix_type == "Covariance":
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
Var_covar = pca_covar.explained_variance_ratio_
PC_df_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_covar))],
columns=['Principal Component'])
PC_num_covar = [float(i + 1) for i in range(len(features_covar))]
Var_df_covar = pd.DataFrame(data=Var_covar, columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_covar = Var_df_covar.cumsum()
Var_dff_covar = pd.concat([PC_df_covar, (Var_cumsum_covar * 100)], axis=1)
PC_interp_covar = np.interp(70, Var_dff_covar['Cumulative Proportion of Explained Variance'], PC_num_covar)
PC_interp_int_covar = math.ceil(PC_interp_covar)
eigenvalues_covar = pca_covar.explained_variance_
Eigen_df_covar = pd.DataFrame(data=eigenvalues_covar, columns=['Eigenvalues'])
Eigen_dff_covar = pd.concat([PC_df_covar, Eigen_df_covar], axis=1)
data = Eigen_dff_covar
elif outlier == 'Yes' and matrix_type == 'Covariance':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
Var_outlier_covar = pca_outlier_covar.explained_variance_ratio_
PC_df_outlier_covar = pd.DataFrame(data=['PC' + str(i + 1) for i in range(len(features_outlier_covar))],
columns=['Principal Component'])
PC_num_outlier_covar = [float(i + 1) for i in range(len(features_outlier_covar))]
Var_df_outlier_covar = pd.DataFrame(data=Var_outlier_covar,
columns=['Cumulative Proportion of Explained Variance'])
Var_cumsum_outlier_covar = Var_df_outlier_covar.cumsum()
Var_dff_outlier_covar = pd.concat([PC_df_outlier_covar, (Var_cumsum_outlier_covar * 100)], axis=1)
PC_interp_outlier = np.interp(70, Var_dff_outlier_covar['Cumulative Proportion of Explained Variance'],
PC_num_outlier_covar)
PC_interp_int_outlier = math.ceil(PC_interp_outlier)
eigenvalues_outlier_covar = pca_outlier_covar.explained_variance_
Eigen_df_outlier_covar = pd.DataFrame(data=eigenvalues_outlier_covar, columns=['Eigenvalues'])
Eigen_dff_outlier_covar = pd.concat([PC_df_outlier_covar, Eigen_df_outlier_covar], axis=1)
data = Eigen_dff_outlier_covar
traces.append(go.Scatter(x=data['Principal Component'], y=data['Eigenvalues'], mode='lines'))
return {'data': traces,
'layout': go.Layout(title='<b>Scree Plot Eigenvalues</b>', xaxis={'title': 'Principal Component',
'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True},
titlefont=dict(family='Helvetica', size=16),
yaxis={'title': 'Eigenvalues', 'mirror': True,
'ticks': 'outside',
'showline': True,
'showspikes': True}, hovermode='closest',
font=dict(family="Helvetica"), template="simple_white", )
}
def round_up(n, decimals=0):
multiplier = 10 ** decimals
return math.ceil(n * multiplier) / multiplier
def round_down(n, decimals=0):
multiplier = 10 ** decimals
return math.floor(n * multiplier) / multiplier
@app.callback([Output('PC-feature-heatmap', 'figure'),
Output('color-range-container', 'children')],
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input("matrix-type-heatmap", "value"),
Input('csv-data', 'data')]
)
def update_graph_stat(outlier, colorscale, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
# INCLUDING OUTLIERS
features1 = dff.columns
features = list(features1)
x = dff.loc[:, features].values
# Separating out the target (if any)
y = dff.loc[:, ].values
# Standardizing the features to {mean, variance} = {0, 1}
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=len(features))
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data=principalComponents
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf = pd.concat([df[[df.columns[0]]], principalDf], axis=1)
dfff = finalDf
# explained variance of the the two principal components
# print(pca.explained_variance_ratio_)
# Explained variance tells us how much information (variance) can be attributed to each of the principal components
# loading of each feature in principle components
loading = pca.components_.T * np.sqrt(pca.explained_variance_)
loading_df = pd.DataFrame(data=loading[0:, 0:], index=features,
columns=['PC' + str(i + 1) for i in range(loading.shape[1])])
loading_dff = loading_df.T
# OUTLIERS REMOVED
z_scores_hm = scipy.stats.zscore(dff)
abs_z_scores_hm = np.abs(z_scores_hm)
filtered_entries_hm = (abs_z_scores_hm < 3).all(axis=1)
outlier_dff_hm = dff[filtered_entries_hm]
features1_outlier_hm = outlier_dff_hm.columns
features_outlier2 = list(features1_outlier_hm)
outlier_names1_hm = df[filtered_entries_hm]
outlier_names_hm = outlier_names1_hm.iloc[:, 0]
x_outlier_hm = outlier_dff_hm.loc[:, features_outlier2].values
# Separating out the target (if any)
# Standardizing the features
x_outlier_hm = StandardScaler().fit_transform(x_outlier_hm)
pca_outlier_hm = PCA(n_components=len(features_outlier2))
principalComponents_outlier_hm = pca_outlier_hm.fit_transform(x_outlier_hm)
principalDf_outlier_hm = pd.DataFrame(data=principalComponents_outlier_hm
, columns=['PC' + str(i + 1) for i in range(len(features_outlier2))])
# combining principle components and target
finalDf_outlier_hm = pd.concat([outlier_names_hm, principalDf_outlier_hm], axis=1)
dfff_outlier_hm = finalDf_outlier_hm
# calculating loading
loading_outlier_hm = pca_outlier_hm.components_.T * np.sqrt(pca_outlier_hm.explained_variance_)
loading_df_outlier_hm = pd.DataFrame(data=loading_outlier_hm[0:, 0:], index=features_outlier2,
columns=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])])
loading_dff_outlier_hm = loading_df_outlier_hm.T
# COVAR MATRIX
features1_covar = dff.columns
features_covar = list(features1_covar)
x = dff.loc[:, features_covar].values
pca_covar = PCA(n_components=len(features_covar))
principalComponents_covar = pca_covar.fit_transform(x)
principalDf_covar = pd.DataFrame(data=principalComponents_covar
, columns=['PC' + str(i + 1) for i in range(len(features_covar))])
finalDf_covar = pd.concat([df[[df.columns[0]]], principalDf_covar], axis=1)
loading_covar = pca_covar.components_.T * np.sqrt(pca_covar.explained_variance_)
loading_df_covar = pd.DataFrame(data=loading_covar[0:, 0:], index=features_covar,
columns=['PC' + str(i + 1) for i in range(loading_covar.shape[1])])
loading_dff_covar = loading_df_covar.T
# COVAR MATRIX OUTLIERS REMOVED
if outlier == 'No' and matrix_type == "Correlation":
data = loading_dff
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_dff_outlier_hm
elif outlier == 'No' and matrix_type == "Covariance":
data = loading_dff_covar
elif outlier == "Yes" and matrix_type == "Covariance":
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier_covar = outlier_dff.columns
features_outlier_covar = list(features1_outlier_covar)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_outlier = outlier_dff.loc[:, features_outlier_covar].values
pca_outlier_covar = PCA(n_components=len(features_outlier_covar))
principalComponents_outlier_covar = pca_outlier_covar.fit_transform(x_outlier)
principalDf_outlier_covar = pd.DataFrame(data=principalComponents_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier_covar))])
# combining principle components and target
finalDf_outlier_covar = pd.concat([outlier_names, principalDf_outlier_covar], axis=1)
# calculating loading
loading_outlier_covar = pca_outlier_covar.components_.T * np.sqrt(pca_outlier_covar.explained_variance_)
loading_df_outlier_covar = pd.DataFrame(data=loading_outlier_covar[0:, 0:], index=features_outlier_covar,
columns=['PC' + str(i + 1) for i in
range(loading_outlier_covar.shape[1])])
loading_dff_outlier_covar = loading_df_outlier_covar.T
data = loading_dff_outlier_covar
size_range = [round_up(data.values.min(), 2),round_down(data.values.max(),2) ]
traces.append(go.Heatmap(
z=data, x=features_outlier2, y=['PC' + str(i + 1) for i in range(loading_outlier_hm.shape[1])],
colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "Loading",
# 'tickvals': [round_up(data.values.min(), 2),
# round_up((data.values.min() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down((data.values.max() + data.values.min())/2,2),
# round_down((data.values.max() + (data.values.max() + data.values.min())/2)/2, 2),
# round_down(data.values.max(),2), ]
}
))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>PC and Feature Correlation Analysis</b>'),
xaxis=dict(title_text='Features', title_standoff=50),
titlefont=dict(family='Helvetica', size=16),
hovermode='closest', margin={'b': 110, 't': 50, 'l': 75},
font=dict(family="Helvetica", size=11),
annotations=[
dict(x=-0.16, y=0.5, showarrow=False, text="Principal Components",
xref='paper', yref='paper', textangle=-90,
font=dict(size=12))]
),
}, '{}'.format(size_range)
@app.callback(Output('feature-heatmap', 'figure'),
[
Input('PC-feature-outlier-value', 'value'),
Input('colorscale', 'value'),
Input('csv-data', 'data')])
def update_graph_stat(outlier, colorscale, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
traces = []
if outlier == 'No':
features1 = dff.columns
features = list(features1)
# correlation coefficient and coefficient of determination
correlation_dff = dff.corr(method='pearson', )
r2_dff = correlation_dff * correlation_dff
data = r2_dff
feat = features
elif outlier == 'Yes':
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
# correlation coefficient and coefficient of determination
correlation_dff_outlier = outlier_dff.corr(method='pearson', )
r2_dff_outlier = correlation_dff_outlier * correlation_dff_outlier
data = r2_dff_outlier
feat = features_outlier
traces.append(go.Heatmap(
z=data, x=feat, y=feat, colorscale="Viridis" if colorscale == 'Viridis' else "Plasma",
# coord: represent the correlation between the various feature and the principal component itself
colorbar={"title": "R²", 'tickvals': [0, 0.2, 0.4, 0.6, 0.8, 1]}))
return {'data': traces,
'layout': go.Layout(title=dict(text='<b>Feature Correlation Analysis</b>', y=0.97, x=0.6),
xaxis={},
titlefont=dict(family='Helvetica', size=16),
yaxis={},
hovermode='closest', margin={'b': 110, 't': 50, 'l': 180, 'r': 50},
font=dict(family="Helvetica", size=11)),
}
@app.callback(Output('feature-input', 'options'),
[Input('all-custom-choice', 'value'),
Input('csv-data', 'data')])
def activate_input(all_custom, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
options = []
elif all_custom == 'Custom':
options = [{'label': i, 'value': i} for i in dff.columns]
return options
@app.callback(Output('color-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')], )
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
@app.callback(Output('size-scale-scores', 'options'),
[Input('feature-input', 'value'),
Input('radio-target-item-second', 'value'),
Input('outlier-value-biplot', 'value'),
Input('customvar-graph-update', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')])
def populate_color_dropdown(input, target, outlier, graph_type, matrix_type, data):
if not data:
return dash.no_update
if input is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
dff_target = dff[input]
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
if target == 'Yes' and outlier == 'Yes' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'Yes' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target_outlier.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Correlation" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'Yes' and outlier == 'No' and matrix_type == "Covariance" and graph_type == 'Biplot':
options = [{'label': i, 'value': i} for i in dff_target.columns]
elif target == 'No' or graph_type == 'Loadings':
options = []
return options
# resume covar matrix...
@app.callback(Output('biplot', 'figure'),
[
Input('outlier-value-biplot', 'value'),
Input('feature-input', 'value'),
Input('customvar-graph-update', 'value'),
Input('color-scale-scores', 'value'),
Input('radio-target-item', 'value'),
Input('size-scale-scores', 'value'),
Input('radio-target-item-second', 'value'),
Input('all-custom-choice', 'value'),
Input('matrix-type-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_graph_custom(outlier, input, graph_update, color, target, size, target2, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_dff = pd.concat([zero_scale_df, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# calculating loading
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, line_group_scale_df_covar], axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
# COVARIANCE MATRIX OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
trace2_all = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, showscale=False, size=12,
line=dict(width=0.5, color='DarkSlateGrey'),
),
)
####################################################################################################
# INCLUDE THIS
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_covar
variance = Var_outlier_scale_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], line=dict(color="#4f4f4f"),
name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text',
textposition='bottom right', textfont=dict(size=12)
)
lists[counter] = trace1_all
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2_all)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_dff = pd.concat([zero_scale_input_df, line_group_scale_input_df], axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, line_group_scale_input_df_covar], axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
# COVARIANCE MATRIX OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
variance = Var_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
variance = Var_scale_input_outlier_covar
trace2 = go.Scatter(x=dat['PC1'], y=dat['PC2'], mode='markers',
marker_color=dat[color] if target == 'Yes' else None,
marker_size=dat[size] if target2 == 'Yes' else 12,
text=dat[dat.columns[0]],
hovertemplate=
'<b>%{text}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
marker=dict(opacity=0.7, colorscale='Plasma',
sizeref=max(dat[size]) / (15 ** 2) if target2 == 'Yes' else None,
sizemode='area',
showscale=True if target == 'Yes' else False,
line=dict(width=0.5, color='DarkSlateGrey'),
colorbar=dict(title=dict(text=color if target == 'Yes' else None,
font=dict(family='Helvetica'),
side='right'), ypad=0),
),
)
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_scale_input_outlier_line_graph
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_covar
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'],
line=dict(color="#666666" if target == 'Yes' else '#4f4f4f'), name=i,
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
)
lists[counter] = trace1
counter = counter + 1
####################################################################################################
if graph_update == 'Biplot':
lists.insert(0, trace2)
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif graph_update == 'Loadings':
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2))),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2))),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(
Output('size-second-target-container', 'children'),
[Input('size-scale-scores', 'value'),
Input('outlier-value-biplot', 'value'),
Input('csv-data', 'data')
]
)
def update_output(size, outlier, data):
if not data:
return dash.no_update
if size is None:
raise dash.exceptions.PreventUpdate
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
z_scores_dff_size = scipy.stats.zscore(dff)
abs_z_scores_dff_size = np.abs(z_scores_dff_size)
filtered_entries_dff_size = (abs_z_scores_dff_size < 3).all(axis=1)
dff_target_outlier_size = dff[filtered_entries_dff_size]
if outlier == 'Yes':
size_range = [round(dff_target_outlier_size[size].min(), 2), round(dff_target_outlier_size[size].max(), 2)]
elif outlier == 'No':
size_range = [round(dff[size].min(), 2), round(dff[size].max(), 2)]
return '{}'.format(size_range)
@app.callback(Output('cos2-plot', 'figure'),
[
Input('outlier-value-cos2', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-cos2", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
# x_scale = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale)
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df['cos2'] = (loading_scale_df["PC1"] ** 2) + (loading_scale_df["PC2"] ** 2)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_df, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_df.iloc[:, 2], columns=['cos2'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
# x_outlier_scale = MinMaxScaler().fit_transform(x_outlier_scale)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_outlier_scale = rescale(x_outlier_scale, new_min=0, new_max=1)
# uses covariance matrix
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["cos2"] = (loading_outlier_scale_df["PC1"] ** 2) + (
loading_outlier_scale_df["PC2"] ** 2)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_df, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_df.iloc[:, 2], columns=['cos2'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='cos2')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar['cos2'] = (loading_scale_df_covar["PC1"] ** 2) + (loading_scale_df_covar["PC2"] ** 2)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_df_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
# calculating loading
# calculating loading vector plot
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["cos2"] = (loading_outlier_scale_df_covar["PC1"] ** 2) + (
loading_outlier_scale_df_covar["PC2"] ** 2)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_df_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_df_covar.iloc[:, 2],
columns=['cos2'])
zero_outlier_scale_dff_covar = pd.concat([zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar,
line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='cos2')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
# text=i,
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
textposition='bottom right', textfont=dict(size=12)
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers',
hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)), mirror=True,
ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)), mirror=True,
ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
# # x_scale_input = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
#
# x_scale_input = rescale(x_scale_input, new_min=0, new_max=1)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["cos2"] = (loading_scale_input_df["PC1"] ** 2) + (loading_scale_input_df["PC2"] ** 2)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_df, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_df.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
# # x_scale_input_outlier = MinMaxScaler(feature_range=(0, 1), copy=True).fit_transform(x_scale_input_outlier)
# def rescale(data, new_min=0, new_max=1):
# """Rescale the data to be within the range [new_min, new_max]"""
# return (data - data.min()) / (data.max() - data.min()) * (new_max - new_min) + new_min
# x_scale_input_outlier = rescale(x_scale_input_outlier, new_min=0, new_max=1)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["cos2"] = (loading_scale_input_outlier_df["PC1"] ** 2) + \
(loading_scale_input_outlier_df["PC2"] ** 2)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat([loading_scale_input_outlier_df, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_df.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='cos2')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='cos2')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["cos2"] = (loading_scale_input_df_covar["PC1"] ** 2) + (
loading_scale_input_df_covar["PC2"] ** 2)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_df_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_df_covar.iloc[:, 2], columns=['cos2'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='cos2')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["cos2"] = (loading_scale_input_outlier_df_covar["PC1"] ** 2) + \
(loading_scale_input_outlier_df_covar["PC2"] ** 2)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat([loading_scale_input_outlier_df_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_df_covar.iloc[:, 2],
columns=['cos2'])
zero_scale_input_outlier_dff_covar = pd.concat([zero_scale_input_outlier_df_covar,
zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='cos2')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
variance = Var_scale_input_outlier_covar
data = loading_scale_input_outlier_line_graph_sort_covar
N = len(data['cos2'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["cos2"].min(), data["cos2"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Cos2",
side='right'), ypad=0)
), )
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('contrib-plot', 'figure'),
[
Input('outlier-value-contrib', 'value'),
Input('feature-input', 'value'),
Input('all-custom-choice', 'value'),
Input("matrix-type-contrib", "value"),
Input('csv-data', 'data')
])
def update_cos2_plot(outlier, input, all_custom, matrix_type, data):
if not data:
return dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
if all_custom == 'All':
features1 = dff.columns
features = list(features1)
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
Var_scale = pca_scale.explained_variance_ratio_
# calculating loading vector plot
loading_scale = pca_scale.components_.T * np.sqrt(pca_scale.explained_variance_)
loading_scale_df = pd.DataFrame(data=loading_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df["PC1_cos2"] = loading_scale_df["PC1"] ** 2
loading_scale_df["PC2_cos2"] = loading_scale_df["PC2"] ** 2
loading_scale_df["PC1_contrib"] = \
(loading_scale_df["PC1_cos2"] * 100) / (loading_scale_df["PC1_cos2"].sum(axis=0))
loading_scale_df["PC2_contrib"] = \
(loading_scale_df["PC2_cos2"] * 100) / (loading_scale_df["PC2_cos2"].sum(axis=0))
loading_scale_df["contrib"] = loading_scale_df["PC1_contrib"] + loading_scale_df["PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_scale_dataf = pd.concat([loading_scale_df.iloc[:, 0:2], loading_scale_df.iloc[:, 6]], axis=1)
line_group_scale_df = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff = pd.concat([loading_scale_dataf, line_group_scale_df], axis=1)
a = (len(features), 2)
zero_scale = np.zeros(a)
zero_scale_df = pd.DataFrame(data=zero_scale, columns=["PC1", "PC2"])
zero_scale_df_color = pd.DataFrame(data=loading_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_dff = pd.concat([zero_scale_df, zero_scale_df_color, line_group_scale_df], axis=1)
loading_scale_line_graph = pd.concat([loading_scale_dff, zero_scale_dff], axis=0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
Var_outlier_scale = pca_outlier_scale.explained_variance_ratio_
loading_outlier_scale = pca_outlier_scale.components_.T * np.sqrt(pca_outlier_scale.explained_variance_)
loading_outlier_scale_df = pd.DataFrame(data=loading_outlier_scale[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df["PC1_cos2"] = loading_outlier_scale_df["PC1"] ** 2
loading_outlier_scale_df["PC2_cos2"] = loading_outlier_scale_df["PC2"] ** 2
loading_outlier_scale_df["PC1_contrib"] = \
(loading_outlier_scale_df["PC1_cos2"] * 100) / (loading_outlier_scale_df["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df["PC2_contrib"] = \
(loading_outlier_scale_df["PC2_cos2"] * 100) / (loading_outlier_scale_df["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df["contrib"] = loading_outlier_scale_df["PC1_contrib"] + loading_outlier_scale_df[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf = pd.concat(
[loading_outlier_scale_df.iloc[:, 0:2], loading_outlier_scale_df.iloc[:, 6]], axis=1)
line_group_df = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff = pd.concat([loading_outlier_scale_dataf, line_group_df], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale = np.zeros(a)
zero_outlier_scale_df = pd.DataFrame(data=zero_outlier_scale, columns=["PC1", "PC2"])
zero_outlier_scale_df_color = pd.DataFrame(data=loading_outlier_scale_dataf.iloc[:, 2], columns=['contrib'])
zero_outlier_scale_dff = pd.concat([zero_outlier_scale_df, zero_outlier_scale_df_color, line_group_df], axis=1)
loading_outlier_scale_line_graph = pd.concat([loading_outlier_scale_dff, zero_outlier_scale_dff], axis=0)
loading_scale_line_graph_sort = loading_scale_line_graph.sort_values(by='contrib')
loading_outlier_scale_line_graph_sort = loading_outlier_scale_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
Var_scale_covar = pca_scale_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_covar = pca_scale_covar.components_.T * np.sqrt(pca_scale_covar.explained_variance_)
loading_scale_df_covar = pd.DataFrame(data=loading_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_df_covar["PC1_cos2"] = loading_scale_df_covar["PC1"] ** 2
loading_scale_df_covar["PC2_cos2"] = loading_scale_df_covar["PC2"] ** 2
loading_scale_df_covar["PC1_contrib"] = \
(loading_scale_df_covar["PC1_cos2"] * 100) / (loading_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_df_covar["PC2_contrib"] = \
(loading_scale_df_covar["PC2_cos2"] * 100) / (loading_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_df_covar["contrib"] = loading_scale_df_covar["PC1_contrib"] + loading_scale_df_covar[
"PC2_contrib"]
loading_scale_dataf_covar = pd.concat([loading_scale_df_covar.iloc[:, 0:2], loading_scale_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_df_covar = pd.DataFrame(data=features, columns=['line_group'])
loading_scale_dff_covar = pd.concat([loading_scale_dataf_covar, line_group_scale_df_covar], axis=1)
a = (len(features), 2)
zero_scale_covar = np.zeros(a)
zero_scale_df_covar = pd.DataFrame(data=zero_scale_covar, columns=["PC1", "PC2"])
zero_scale_df_color_covar = pd.DataFrame(data=loading_scale_dataf_covar.iloc[:, 2], columns=['contrib'])
zero_scale_dff_covar = pd.concat([zero_scale_df_covar, zero_scale_df_color_covar, line_group_scale_df_covar],
axis=1)
loading_scale_line_graph_covar = pd.concat([loading_scale_dff_covar, zero_scale_dff_covar], axis=0)
loading_scale_line_graph_sort_covar = loading_scale_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX OUTLIERS REMOVED
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
Var_outlier_scale_covar = pca_outlier_scale_covar.explained_variance_ratio_
loading_outlier_scale_covar = pca_outlier_scale_covar.components_.T * np.sqrt(
pca_outlier_scale_covar.explained_variance_)
loading_outlier_scale_df_covar = pd.DataFrame(data=loading_outlier_scale_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_outlier_scale_df_covar["PC1_cos2"] = loading_outlier_scale_df_covar["PC1"] ** 2
loading_outlier_scale_df_covar["PC2_cos2"] = loading_outlier_scale_df_covar["PC2"] ** 2
loading_outlier_scale_df_covar["PC1_contrib"] = \
(loading_outlier_scale_df_covar["PC1_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC1_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["PC2_contrib"] = \
(loading_outlier_scale_df_covar["PC2_cos2"] * 100) / (
loading_outlier_scale_df_covar["PC2_cos2"].sum(axis=0))
loading_outlier_scale_df_covar["contrib"] = loading_outlier_scale_df_covar["PC1_contrib"] + \
loading_outlier_scale_df_covar[
"PC2_contrib"]
# after youve got sum of contrib (colorscale) get that and PC1 and PC2 into a sep df
loading_outlier_scale_dataf_covar = pd.concat(
[loading_outlier_scale_df_covar.iloc[:, 0:2], loading_outlier_scale_df_covar.iloc[:, 6]], axis=1)
line_group_df_covar = pd.DataFrame(data=features_outlier, columns=['line_group'])
loading_outlier_scale_dff_covar = pd.concat([loading_outlier_scale_dataf_covar, line_group_df_covar], axis=1)
a = (len(features_outlier), 2)
zero_outlier_scale_covar = np.zeros(a)
zero_outlier_scale_df_covar = pd.DataFrame(data=zero_outlier_scale_covar, columns=["PC1", "PC2"])
zero_outlier_scale_df_color_covar = pd.DataFrame(data=loading_outlier_scale_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_outlier_scale_dff_covar = pd.concat(
[zero_outlier_scale_df_covar, zero_outlier_scale_df_color_covar, line_group_df_covar], axis=1)
loading_outlier_scale_line_graph_covar = pd.concat(
[loading_outlier_scale_dff_covar, zero_outlier_scale_dff_covar], axis=0)
loading_outlier_scale_line_graph_sort_covar = loading_outlier_scale_line_graph_covar.sort_values(by='contrib')
# scaling data
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_line_graph_sort
variance = Var_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
data = loading_outlier_scale_line_graph_sort
variance = Var_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_line_graph_sort_covar
variance = Var_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_outlier_scale_line_graph_sort_covar
variance = Var_outlier_scale_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter = 0
counter_color = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf_all = data[data['line_group'] == i]
trace1_all = go.Scatter(x=dataf_all['PC1'], y=dataf_all['PC2'], mode='lines+text',
name=i, line=dict(color=colorscale[counter_color]),
textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, opacity=0,
color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0),
), )
lists[counter] = trace1_all
counter = counter + 1
counter_color = counter_color + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
elif all_custom == "Custom":
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
Var_scale_input = pca_scale_input.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input = pca_scale_input.components_.T * np.sqrt(pca_scale_input.explained_variance_)
loading_scale_input_df = pd.DataFrame(data=loading_scale_input[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df["PC1_cos2"] = loading_scale_input_df["PC1"] ** 2
loading_scale_input_df["PC2_cos2"] = loading_scale_input_df["PC2"] ** 2
loading_scale_input_df["PC1_contrib"] = \
(loading_scale_input_df["PC1_cos2"] * 100) / (loading_scale_input_df["PC1_cos2"].sum(axis=0))
loading_scale_input_df["PC2_contrib"] = \
(loading_scale_input_df["PC2_cos2"] * 100) / (loading_scale_input_df["PC2_cos2"].sum(axis=0))
loading_scale_input_df["contrib"] = loading_scale_input_df["PC1_contrib"] + loading_scale_input_df[
"PC2_contrib"]
loading_scale_input_dataf = pd.concat(
[loading_scale_input_df.iloc[:, 0:2], loading_scale_input_df.iloc[:, 6]], axis=1)
line_group_scale_input_df = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff = pd.concat([loading_scale_input_dataf, line_group_scale_input_df],
axis=1)
a = (len(features_input), 2)
zero_scale_input = np.zeros(a)
zero_scale_input_df = pd.DataFrame(data=zero_scale_input, columns=["PC1", "PC2"])
zero_scale_input_df_color = pd.DataFrame(data=loading_scale_input_dataf.iloc[:, 2], columns=['contrib'])
zero_scale_input_dff = pd.concat([zero_scale_input_df, zero_scale_input_df_color, line_group_scale_input_df],
axis=1)
loading_scale_input_line_graph = pd.concat([loading_scale_input_dff, zero_scale_input_dff],
axis=0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
Var_scale_input_outlier = pca_scale_input_outlier.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier = pca_scale_input_outlier.components_.T * np.sqrt(
pca_scale_input_outlier.explained_variance_)
loading_scale_input_outlier_df = pd.DataFrame(data=loading_scale_input_outlier[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df["PC1_cos2"] = loading_scale_input_outlier_df["PC1"] ** 2
loading_scale_input_outlier_df["PC2_cos2"] = loading_scale_input_outlier_df["PC2"] ** 2
loading_scale_input_outlier_df["PC1_contrib"] = \
(loading_scale_input_outlier_df["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df["PC2_contrib"] = \
(loading_scale_input_outlier_df["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df["contrib"] = loading_scale_input_outlier_df["PC1_contrib"] + \
loading_scale_input_outlier_df[
"PC2_contrib"]
loading_scale_input_outlier_dataf = pd.concat(
[loading_scale_input_outlier_df.iloc[:, 0:2], loading_scale_input_outlier_df.iloc[:, 6]], axis=1)
line_group_scale_input_outlier_df = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff = pd.concat(
[loading_scale_input_outlier_dataf, line_group_scale_input_outlier_df],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier = np.zeros(a)
zero_scale_input_outlier_df = pd.DataFrame(data=zero_scale_input_outlier, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color = pd.DataFrame(data=loading_scale_input_outlier_dataf.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff = pd.concat([zero_scale_input_outlier_df, zero_scale_input_outlier_df_color,
line_group_scale_input_outlier_df],
axis=1)
loading_scale_input_outlier_line_graph = pd.concat(
[loading_scale_input_outlier_dff, zero_scale_input_outlier_dff],
axis=0)
loading_scale_input_line_graph_sort = loading_scale_input_line_graph.sort_values(by='contrib')
loading_scale_input_outlier_line_graph_sort = loading_scale_input_outlier_line_graph.sort_values(by='contrib')
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
Var_scale_input_covar = pca_scale_input_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_covar = pca_scale_input_covar.components_.T * np.sqrt(
pca_scale_input_covar.explained_variance_)
loading_scale_input_df_covar = pd.DataFrame(data=loading_scale_input_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_df_covar["PC1_cos2"] = loading_scale_input_df_covar["PC1"] ** 2
loading_scale_input_df_covar["PC2_cos2"] = loading_scale_input_df_covar["PC2"] ** 2
loading_scale_input_df_covar["PC1_contrib"] = \
(loading_scale_input_df_covar["PC1_cos2"] * 100) / (loading_scale_input_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_df_covar["PC2_contrib"] = \
(loading_scale_input_df_covar["PC2_cos2"] * 100) / (loading_scale_input_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_df_covar["contrib"] = loading_scale_input_df_covar["PC1_contrib"] + \
loading_scale_input_df_covar[
"PC2_contrib"]
loading_scale_input_dataf_covar = pd.concat(
[loading_scale_input_df_covar.iloc[:, 0:2], loading_scale_input_df_covar.iloc[:, 6]], axis=1)
line_group_scale_input_df_covar = pd.DataFrame(data=features_input, columns=['line_group'])
loading_scale_input_dff_covar = pd.concat([loading_scale_input_dataf_covar, line_group_scale_input_df_covar],
axis=1)
a = (len(features_input), 2)
zero_scale_input_covar = np.zeros(a)
zero_scale_input_df_covar = pd.DataFrame(data=zero_scale_input_covar, columns=["PC1", "PC2"])
zero_scale_input_df_color_covar = pd.DataFrame(data=loading_scale_input_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_dff_covar = pd.concat([zero_scale_input_df_covar, zero_scale_input_df_color_covar,
line_group_scale_input_df_covar],
axis=1)
loading_scale_input_line_graph_covar = pd.concat([loading_scale_input_dff_covar, zero_scale_input_dff_covar],
axis=0)
loading_scale_input_line_graph_sort_covar = loading_scale_input_line_graph_covar.sort_values(by='contrib')
# COVARIANCE MATRIX WITH OUTLIERS
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
Var_scale_input_outlier_covar = pca_scale_input_outlier_covar.explained_variance_ratio_
# calculating loading vector plot
loading_scale_input_outlier_covar = pca_scale_input_outlier_covar.components_.T * np.sqrt(
pca_scale_input_outlier_covar.explained_variance_)
loading_scale_input_outlier_df_covar = pd.DataFrame(data=loading_scale_input_outlier_covar[:, 0:2],
columns=["PC1", "PC2"])
loading_scale_input_outlier_df_covar["PC1_cos2"] = loading_scale_input_outlier_df_covar["PC1"] ** 2
loading_scale_input_outlier_df_covar["PC2_cos2"] = loading_scale_input_outlier_df_covar["PC2"] ** 2
loading_scale_input_outlier_df_covar["PC1_contrib"] = \
(loading_scale_input_outlier_df_covar["PC1_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC1_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["PC2_contrib"] = \
(loading_scale_input_outlier_df_covar["PC2_cos2"] * 100) / (
loading_scale_input_outlier_df_covar["PC2_cos2"].sum(axis=0))
loading_scale_input_outlier_df_covar["contrib"] = loading_scale_input_outlier_df_covar["PC1_contrib"] + \
loading_scale_input_outlier_df_covar[
"PC2_contrib"]
loading_scale_input_outlier_dataf_covar = pd.concat(
[loading_scale_input_outlier_df_covar.iloc[:, 0:2], loading_scale_input_outlier_df_covar.iloc[:, 6]],
axis=1)
line_group_scale_input_outlier_df_covar = pd.DataFrame(data=features_input_outlier, columns=['line_group'])
loading_scale_input_outlier_dff_covar = pd.concat(
[loading_scale_input_outlier_dataf_covar, line_group_scale_input_outlier_df_covar],
axis=1)
a = (len(features_input_outlier), 2)
zero_scale_input_outlier_covar = np.zeros(a)
zero_scale_input_outlier_df_covar = pd.DataFrame(data=zero_scale_input_outlier_covar, columns=["PC1", "PC2"])
zero_scale_input_outlier_df_color_covar = pd.DataFrame(data=loading_scale_input_outlier_dataf_covar.iloc[:, 2],
columns=['contrib'])
zero_scale_input_outlier_dff_covar = pd.concat(
[zero_scale_input_outlier_df_covar, zero_scale_input_outlier_df_color_covar,
line_group_scale_input_outlier_df_covar],
axis=1)
loading_scale_input_outlier_line_graph_covar = pd.concat(
[loading_scale_input_outlier_dff_covar, zero_scale_input_outlier_dff_covar],
axis=0)
loading_scale_input_outlier_line_graph_sort_covar = loading_scale_input_outlier_line_graph_covar.sort_values(
by='contrib')
####################################################################################################
if outlier == 'No' and matrix_type == "Correlation":
data = loading_scale_input_line_graph_sort
variance = Var_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
variance = Var_scale_input_outlier
data = loading_scale_input_outlier_line_graph_sort
elif outlier == "No" and matrix_type == "Covariance":
data = loading_scale_input_line_graph_sort_covar
variance = Var_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
data = loading_scale_input_outlier_line_graph_sort_covar
variance = Var_scale_input_outlier_covar
N = len(data['contrib'].unique())
end_color = "#00264c" # dark blue
start_color = "#c6def5" # light blue
colorscale = [x.hex for x in list(Color(start_color).range_to(Color(end_color), N))]
counter_color = 0
counter = 0
lists = [[] for i in range(len(data['line_group'].unique()))]
for i in data['line_group'].unique():
dataf = data[data['line_group'] == i]
trace1 = go.Scatter(x=dataf['PC1'], y=dataf['PC2'], name=i, line=dict(color=colorscale[counter_color]),
mode='lines+text', textposition='bottom right', textfont=dict(size=12),
meta=i,
hovertemplate=
'<b>%{meta}</b>' +
'<br>PC1: %{x}<br>' +
'PC2: %{y}'
"<extra></extra>",
)
trace2_all = go.Scatter(x=[1, -1], y=[1, -1], mode='markers', hoverinfo='skip',
marker=dict(showscale=True, color=[data["contrib"].min(), data["contrib"].max()],
colorscale=colorscale, opacity=0,
colorbar=dict(title=dict(text="Contribution",
side='right'), ypad=0)
))
lists[counter] = trace1
counter_color = counter_color + 1
counter = counter + 1
lists.append(trace2_all)
####################################################################################################
return {'data': lists,
'layout': go.Layout(xaxis=dict(title='PC1 ({}%)'.format(round((variance[0] * 100), 2)),
mirror=True, ticks='outside', showline=True),
yaxis=dict(title='PC2 ({}%)'.format(round((variance[1] * 100), 2)),
mirror=True, ticks='outside', showline=True),
showlegend=False, margin={'r': 0},
# shapes=[dict(type="circle", xref="x", yref="y", x0=-1,
# y0=-1, x1=1, y1=1,
# line_color="DarkSlateGrey")]
),
}
@app.callback(Output('download-link', 'download'),
[Input('all-custom-choice', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value')])
def update_filename(all_custom, outlier, matrix_type):
if all_custom == 'All' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Correlation":
download = 'all_variables_correlation_matrix_data.csv'
elif all_custom == 'All' and outlier == 'No' and matrix_type == "Covariance":
download = 'all_variables_covariance_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'Yes' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_outliers_removed_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Correlation":
download = 'custom_variables_correlation_matrix_data.csv'
elif all_custom == 'Custom' and outlier == 'No' and matrix_type == "Covariance":
download = 'custom_variables_covariance_matrix_data.csv'
return download
@app.callback(Output('download-link', 'href'),
[Input('all-custom-choice', 'value'),
Input('feature-input', 'value'),
Input('eigenA-outlier', 'value'),
Input("matrix-type-data-table", 'value'),
Input('csv-data', 'data')])
def update_link(all_custom, input, outlier, matrix_type, data):
if not data:
return dash.no_update, dash.no_update
df = pd.read_json(data, orient='split')
dff = df.select_dtypes(exclude=['object'])
features1 = dff.columns
features = list(features1)
if all_custom == 'All':
# OUTLIER DATA
z_scores = scipy.stats.zscore(dff)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
outlier_dff = dff[filtered_entries]
features1_outlier = outlier_dff.columns
features_outlier = list(features1_outlier)
outlier_names1 = df[filtered_entries]
outlier_names = outlier_names1.iloc[:, 0]
# ORIGINAL DATA WITH OUTLIERS
x_scale = dff.loc[:, features].values
y_scale = dff.loc[:, ].values
x_scale = StandardScaler().fit_transform(x_scale)
# x_scale = rescale(x_scale, new_min=0, new_max=1)
pca_scale = PCA(n_components=len(features))
principalComponents_scale = pca_scale.fit_transform(x_scale)
principalDf_scale = pd.DataFrame(data=principalComponents_scale
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale = pd.concat([df[[df.columns[0]]], principalDf_scale], axis=1)
dfff_scale = finalDf_scale.fillna(0)
# ORIGINAL DATA WITH REMOVING OUTLIERS
x_outlier_scale = outlier_dff.loc[:, features_outlier].values
y_outlier_scale = outlier_dff.loc[:, ].values
x_outlier_scale = StandardScaler().fit_transform(x_outlier_scale)
pca_outlier_scale = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale = pca_outlier_scale.fit_transform(x_outlier_scale)
principalDf_outlier_scale = pd.DataFrame(data=principalComponents_outlier_scale
, columns=['PC' + str(i + 1) for i in range(len(features_outlier))])
# combining principle components and target
finalDf_outlier_scale = pd.concat([outlier_names, principalDf_outlier_scale], axis=1)
dfff_outlier_scale = finalDf_outlier_scale.fillna(0)
# COVARIANCE MATRIX
x_scale_covar = dff.loc[:, features].values
y_scale_covar = dff.loc[:, ].values
pca_scale_covar = PCA(n_components=len(features))
principalComponents_scale_covar = pca_scale_covar.fit_transform(x_scale_covar)
principalDf_scale_covar = pd.DataFrame(data=principalComponents_scale_covar
, columns=['PC' + str(i + 1) for i in range(len(features))])
# combining principle components and target
finalDf_scale_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_covar], axis=1)
dfff_scale_covar = finalDf_scale_covar.fillna(0)
# COVARIANCE MATRIX REMOVING OUTLIERS
x_outlier_scale_covar = outlier_dff.loc[:, features_outlier].values
y_outlier_scale_covar = outlier_dff.loc[:, ].values
pca_outlier_scale_covar = PCA(n_components=len(features_outlier))
principalComponents_outlier_scale_covar = pca_outlier_scale_covar.fit_transform(x_outlier_scale_covar)
principalDf_outlier_scale_covar = pd.DataFrame(data=principalComponents_outlier_scale_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_outlier))])
finalDf_outlier_scale_covar = pd.concat([outlier_names, principalDf_outlier_scale_covar], axis=1)
dfff_outlier_scale_covar = finalDf_outlier_scale_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_outlier_scale
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_outlier_scale_covar
elif all_custom == 'Custom':
# Dropping Data variables
dff_input = dff.drop(columns=dff[input])
features1_input = dff_input.columns
features_input = list(features1_input)
dff_target = dff[input]
# OUTLIER DATA INPUT
z_scores_input = scipy.stats.zscore(dff_input)
abs_z_scores_input = np.abs(z_scores_input)
filtered_entries_input = (abs_z_scores_input < 3).all(axis=1)
dff_input_outlier = dff_input[filtered_entries_input]
features1_input_outlier = dff_input_outlier.columns
features_input_outlier = list(features1_input_outlier)
outlier_names_input1 = df[filtered_entries_input]
outlier_names_input = outlier_names_input1.iloc[:, 0]
# OUTLIER DATA TARGET
z_scores_target = scipy.stats.zscore(dff_target)
abs_z_scores_target = np.abs(z_scores_target)
filtered_entries_target = (abs_z_scores_target < 3).all(axis=1)
dff_target_outlier = dff_target[filtered_entries_target]
# INPUT DATA WITH OUTLIERS
x_scale_input = dff_input.loc[:, features_input].values
y_scale_input = dff_input.loc[:, ].values
x_scale_input = StandardScaler().fit_transform(x_scale_input)
pca_scale_input = PCA(n_components=len(features_input))
principalComponents_scale_input = pca_scale_input.fit_transform(x_scale_input)
principalDf_scale_input = pd.DataFrame(data=principalComponents_scale_input
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input = pd.concat([df[[df.columns[0]]], principalDf_scale_input, dff_target], axis=1)
dfff_scale_input = finalDf_scale_input.fillna(0)
# INPUT DATA WITH REMOVING OUTLIERS
x_scale_input_outlier = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier = dff_input_outlier.loc[:, ].values
x_scale_input_outlier = StandardScaler().fit_transform(x_scale_input_outlier)
pca_scale_input_outlier = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier = pca_scale_input_outlier.fit_transform(x_scale_input_outlier)
principalDf_scale_input_outlier = pd.DataFrame(data=principalComponents_scale_input_outlier
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier, dff_target_outlier],
axis=1)
dfff_scale_input_outlier = finalDf_scale_input_outlier.fillna(0)
# COVARIANCE MATRIX
x_scale_input_covar = dff_input.loc[:, features_input].values
y_scale_input_covar = dff_input.loc[:, ].values
pca_scale_input_covar = PCA(n_components=len(features_input))
principalComponents_scale_input_covar = pca_scale_input_covar.fit_transform(x_scale_input_covar)
principalDf_scale_input_covar = pd.DataFrame(data=principalComponents_scale_input_covar
, columns=['PC' + str(i + 1) for i in range(len(features_input))])
finalDf_scale_input_covar = pd.concat([df[[df.columns[0]]], principalDf_scale_input_covar, dff_target], axis=1)
dfff_scale_input_covar = finalDf_scale_input_covar.fillna(0)
# COVARIANCE MATRIX OUTLIERS REMOVED
x_scale_input_outlier_covar = dff_input_outlier.loc[:, features_input_outlier].values
y_scale_input_outlier_covar = dff_input_outlier.loc[:, ].values
pca_scale_input_outlier_covar = PCA(n_components=len(features_input_outlier))
principalComponents_scale_input_outlier_covar = pca_scale_input_outlier_covar.fit_transform(
x_scale_input_outlier_covar)
principalDf_scale_input_outlier_covar = pd.DataFrame(data=principalComponents_scale_input_outlier_covar
, columns=['PC' + str(i + 1) for i in
range(len(features_input_outlier))])
finalDf_scale_input_outlier_covar = pd.concat(
[outlier_names_input, principalDf_scale_input_outlier_covar, dff_target_outlier],
axis=1)
dfff_scale_input_outlier_covar = finalDf_scale_input_outlier_covar.fillna(0)
if outlier == 'No' and matrix_type == "Correlation":
dat = dfff_scale_input
elif outlier == 'Yes' and matrix_type == "Correlation":
dat = dfff_scale_input_outlier
elif outlier == "No" and matrix_type == "Covariance":
dat = dfff_scale_input_covar
elif outlier == "Yes" and matrix_type == "Covariance":
dat = dfff_scale_input_outlier_covar
csv_string = dat.to_csv(index=False, encoding='utf-8')
csv_string = "data:text/csv;charset=utf-8,%EF%BB%BF" + urllib.parse.quote(csv_string)
return csv_string
@app.callback(Output('download-link-correlation', 'download'),
[Input('eigenA-outlier', 'value'),
])
def update_filename(outlier):
if outlier == 'Yes':
download = 'feature_correlation_removed_outliers_data.csv'
elif outlier == 'No':
download = 'feature_correlation_data.csv'
return download
@app.callback([Output('data-table-correlation', 'data'),
Output('data-table-correlation', 'columns'),
Output('download-link-correlation', 'href')],
[Input("eigenA-outlier", 'value'),
Input('csv-data', 'data')], )
def update_output(outlier, data):
if not data:
return dash.no_update, dash.no_update
df =
|
pd.read_json(data, orient='split')
|
pandas.read_json
|
#------------------------------------------------------------------------------------------
# QR Factorization of Matrix A In Python 3.8.1 Numpy 1.19.2
#
# The following sample demonstrates the QR factorization of
# a randomly generated matrix A of real or complex numbers using:
#
# * Gram-Schmidt Orthogonalization;
# * Householder Reflections;
#
# , and surveys the complexity and performance (single-threaded) of these both methods
#
# GNU Public License (C) 2021 <NAME>
#------------------------------------------------------------------------------------------
import time
import math
import random
import pandas as pd
import numpy as np
import numpy.linalg as lin
from qr_gschmidt import *
from qr_gs_schwrt import *
from qr_householder import *
mat_shape = { 'min': 3, 'max': 15 }
mat_shape_perf = { 'min': 750, 'max': 950 }
qr_alg = [ { 'alg': qr_gs, 'name': 'Gram-Schmidt ' },
{ 'alg': qr_gs_modsr, 'name': 'Schwarz-Rutishauser' },
{ 'alg': qr_hh, 'name': 'Householder ' } ]
mat_types = [ 'real ', 'complex' ]
checkup_status = [ 'failed', 'passed' ]
checkup_banner = "\n[ Verification %s... ]"
stats_banner = "%s Matrix A Statistics:\n"
qr_test_banner = "\nQR Factorization Of A `%s` Matrix Using %s Algorithm:"
survey_banner = "Matrix: %s WINS: [ %s : %d secs ] LOOSES: [ %s : %d secs ]"
perf_stats = "%s : [ type: `%s` exec_time: %d secs verification: %s ]"
app_banner = "QR Factorization v.0.0.1 CPOL License (C) 2021 by <NAME>"
# Function: perf(A, qr, type=complex) evaluates the qr factorization method's execution wall-time in nanoseconds,
# returns the tuple of the resultant matrices Q,R and the execution time
def perf(A, qr, type=complex):
t_d = time.time(); Q,R = qr(A, type); \
return Q, R, (time.time() - t_d)
def check(M1, M2):
v1 = np.reshape(M1,-1)
v2 = np.reshape(M2,-1)
if len(v1) != len(v2):
return False
else: return 0 == len(np.where(np.array(\
[ format(c1, '.4g') == format(c2, '.4g') \
for c1,c2 in zip(v1, v2) ]) == False)[0])
def rand_matrix(rows, cols, type=complex):
np.set_printoptions(precision=8)
if type == complex:
return np.reshape(\
np.random.uniform(1, 10, rows * cols) + \
np.random.uniform(-10, 10, rows * cols) * 1j, (rows, cols))
else: return np.reshape(10 * np.random.uniform(\
0.01, 0.99, rows * cols), (rows, cols))
def print_matrix(M, alias):
np.set_printoptions(\
precision=2, suppress=True, \
formatter='complexfloat')
if isinstance(M, complex):
eps = np.finfo(float).eps; tol = 100
M = [np.real(m) if np.imag(m)<tol*eps else m for m in M]
M = [np.asscalar(np.real_if_close(m)) for m in M]
print("\nMatrix %s (%dx%d):" % \
(alias, len(M), len(M[0])),"\n")
pd.set_option('precision', 2); \
df =
|
pd.DataFrame(M)
|
pandas.DataFrame
|
"""Module for common preprocessing tasks."""
import time
import pandas as pd
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
# TODO: acertar docstrings
# TODO: drop_by
# TODO: apply_custom_item_level (escolher axis)
# TODO: colocar um acompanhamento de progresso
class Prep(object):
"""Preprocessing / preparing data.
Attributes:
data (pandas DataFrame): dataframe with all transformations
"""
def __init__(self, df: pd.DataFrame):
"""Create new object.
Args:
- df (DataFrame): a pandas dataframe to performs preprocessing tasks.
Al tasks are performed on a copy of this DataFrame
"""
self._data = df.copy()
self._le = {}
self._scaler = None
@property
def df(self):
"""Get the actual version of modified df."""
return self._data.copy()
@df.setter
def df(self, df):
"""Set a new dataframe to be modified."""
self._data = df.copy()
return self
def apply_custom(self, fn, args={}):
"""Apply a custom function to the dataframe.
Args:
- fn: custom function to apply. Should receive the dataframe and returns the modified dataframe
Returns:
self
"""
self._data = fn(self._data, **args)
return self
def drop_nulls(self, cols: list = None):
"""Drop all rows with nulls.
Args:
- cols (list): list of columns or None to all dataframe
Returns:
self
"""
if cols == None:
self._data.dropna(inplace=True)
else:
cols = [c for c in cols if c in self._data.columns]
self._data.dropna(subset=cols, inplace=True)
return self
def drop_not_nulls(self, cols: list):
"""Drop all rows with not null values for each column in cols.
Args:
- cols (list): list of columns
Returns:
self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data = self._data[self._data[col].isnull()]
return self
def drop_null_cols(self):
"""Drop colls with all null values.
Returns:
self
"""
self._data.dropna(index=1, how='all')
return self
def drop_cols(self, cols: list):
"""Drop all listed columns.
Args:
- cols (list): list of cols to drop
Returns:
self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data.drop(col, axis=1, inplace=True)
return self
def bool_to_int(self, cols: list):
"""Transform bool into 1 and 0.
Args:
- cols (list): list of cols to transform
Returns:
Self
"""
if cols == None:
self._data.applymap(lambda x: 1 if x else 0)
else:
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col] = self._data[col].apply(lambda x: 1 if x else 0)
return self
# TODO: Salvar label encoder em pickle
def encode(self, cols: list):
"""Encode categorical vars into numeric ones.
Args:
- cols (list): list of columns to encode
Returns:
Self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col].fillna('N/A-ENC', inplace=True)
self._le[col] = LabelEncoder()
self._data[col] = self._le[col].fit_transform(self._data[col])
return self
def inverse_encode(self, cols: list):
"""Encode categorical vars into numeric ones.
Args:
- cols (list): list of columns to encode
Returns:
Self
"""
cols = [c for c in cols if c in self._data.columns]
for col in cols:
self._data[col] = self._le[col].inverse_transform(self._data[col])
return self
def fill_null_with(self, val, cols=None):
"""Fill all null with a same value.
Args:
- val: can be `mean` to replace null with the mean of the columns
or any value to put in place of nulls.
- cols (list): list of columns or None to all dataframe
Returns:
self
"""
if cols == None:
self._data.fillna(val, inplace=True)
else:
cols = [c for c in cols if c in self._data.columns]
if isinstance(val, str):
if val == 'mean':
for col in cols:
self._data[col].fillna((self._data[col].mean()),
inplace=True)
else:
for col in cols:
self._data[col].fillna(val, inplace=True)
else:
for col in cols:
self._data[col].fillna(val, inplace=True)
return self
def dummify(self, columns: list, drop_first: bool = True):
"""Create dummies for selected columns
Args:
columns (list): list of columns to dummify
drop_first (bool, optional): select if the first class will be dropped. Defaults to True
Returns:
pd.DataFrame
"""
for col in columns:
dummy =
|
pd.get_dummies(self._data[col], drop_first=drop_first)
|
pandas.get_dummies
|
# -*- coding: utf-8 -*-
import pytest
from unittest.mock import MagicMock
from copy import deepcopy
import pandas
from .utils import load_data
from tests.utils.df_handler import transform_df
def set_list_tables_mock(client):
list_tables_response = load_data("redshift-data-list-tables-response.json")
list_tables_mock = MagicMock(return_value=list_tables_response)
client.set_mock("ListTables", list_tables_mock)
return list_tables_mock
def set_execute_statement_mock(client, check_kwargs=None):
# to pass parmas to describe_statement_mock
info_for_statements = {}
execute_statement_response_base = load_data(
"redshift-data-execute-statement-response-base.json"
)
execute_statement_mock = MagicMock()
def execute_statement_side_effect(*args, **kwargs):
cluster_identifier = kwargs["ClusterIdentifier"]
database = kwargs["Database"]
sql = kwargs["Sql"]
if check_kwargs:
check_kwargs(kwargs)
response = deepcopy(execute_statement_response_base)
response["ClusterIdentifier"] = cluster_identifier
response["Database"] = database
response["Id"] = "{}{:0=2}".format(
response["Id"], execute_statement_mock.call_count
)
info_for_statement = info_for_statements.setdefault(response["Id"], {})
info_for_statement["ClusterIdentifier"] = cluster_identifier
info_for_statement["Database"] = database
info_for_statement["Sql"] = sql
return response
execute_statement_mock.side_effect = execute_statement_side_effect
client.set_mock("ExecuteStatement", execute_statement_mock)
return info_for_statements, execute_statement_mock
def set_describe_statement_mock(client, info_for_statements, **response_diff):
describe_statement_response_base = load_data(
"redshift-data-describe-statement-response-base.json"
)
describe_statement_mock = MagicMock()
def describe_statement_side_effect(*args, **kwargs):
statement_id = kwargs["Id"]
info_for_statement = info_for_statements[statement_id]
sql = info_for_statement["Sql"]
cluster_identifier = info_for_statement["ClusterIdentifier"]
response = deepcopy(describe_statement_response_base)
response["Id"] = statement_id
response["ClusterIdentifier"] = cluster_identifier
response["QueryString"] = sql
response.update(response_diff)
return response
describe_statement_mock.side_effect = describe_statement_side_effect
client.set_mock("DescribeStatement", describe_statement_mock)
return describe_statement_mock
def test_to_redshift_w_no_secret_arn_and_no_db_user_should_fail(
writer_under_test,
):
from pandas_amazon_redshift.errors import InvalidAuthentication
with pytest.raises(InvalidAuthentication):
writer_under_test(
|
pandas.DataFrame([[1]], columns=["col"])
|
pandas.DataFrame
|
#!/usr/bin/python
"""
CoinMarketCap USD Price History
Print the CoinMarketCap USD price history for a particular cryptocurrency in CSV format.
"""
import sys
import re
import urllib2
import argparse
import datetime
parser = argparse.ArgumentParser()
parser.add_argument("currency", help="This is the name of the crypto, as is shown on coinmarketcap. For BTC, "
"for example, type: bitcoin.", type=str)
parser.add_argument("start_date", help="Start date from which you wish to retrieve the historical data. For example, "
"'2017-10-01'.", type=str)
parser.add_argument("end_date", help="End date for the historical data retrieval. If you wish to retrieve all the "
"data then you can give a date in the future. Same format as in start_date "
"'yyyy-mm-dd'.", type=str)
parser.add_argument("--dataframe", help="If present, returns a pandas DataFrame.",action='store_true')
def parse_options(args):
"""
Extract parameters from command line.
"""
currency = args.currency.lower()
start_date = args.start_date
end_date = args.end_date
start_date_split = start_date.split('-')
end_date_split = end_date.split('-')
start_year = int(start_date_split[0])
end_year = int(end_date_split[0])
# String validation
pattern = re.compile('[2][0][1][0-9]-[0-1][0-9]-[0-3][0-9]')
if not re.match(pattern, start_date):
raise ValueError('Invalid format for the start_date: ' + start_date + ". Should be of the form: yyyy-mm-dd.")
if not re.match(pattern, end_date):
raise ValueError('Invalid format for the end_date: ' + end_date + ". Should be of the form: yyyy-mm-dd.")
# Datetime validation for the correctness of the date. Will throw a ValueError if not valid
datetime.datetime(start_year,int(start_date_split[1]),int(start_date_split[2]))
datetime.datetime(end_year, int(end_date_split[1]), int(end_date_split[2]))
# CoinMarketCap's price data (at least for Bitcoin, presuambly for all others) only goes back to 2013
invalid_args = start_year < 2013
invalid_args = invalid_args or end_year < 2013
invalid_args = invalid_args or end_year < start_year
if invalid_args:
print('Usage: ' + __file__ + ' <currency> <start_date> <end_date> --dataframe')
sys.exit(1)
start_date = start_date_split[0]+ start_date_split[1] + start_date_split[2]
end_date = end_date_split[0] + end_date_split[1] + end_date_split[2]
return currency, start_date, end_date
def download_data(currency, start_date, end_date):
"""
Download HTML price history for the specified cryptocurrency and time range from CoinMarketCap.
"""
url = 'https://coinmarketcap.com/currencies/' + currency + '/historical-data/' + '?start=' \
+ start_date + '&end=' + end_date
try:
page = urllib2.urlopen(url,timeout=10)
if page.getcode() != 200:
raise Exception('Failed to load page')
html = page.read()
page.close()
except Exception as e:
print('Error fetching price data from ' + url)
print('Did you use a valid CoinMarketCap currency?\nIt should be entered exactly as displayed on CoinMarketCap.com (case-insensitive), with dashes in place of spaces.')
if hasattr(e, 'message'):
print("Error message: " + e.message)
else:
print(e)
sys.exit(1)
return html
def extract_data(html):
"""
Extract the price history from the HTML.
The CoinMarketCap historical data page has just one HTML table. This table contains the data we want.
It's got one header row with the column names.
We need to derive the "average" price for the provided data.
"""
head = re.search(r'<thead>(.*)</thead>', html, re.DOTALL).group(1)
header = re.findall(r'<th .*>([\w ]+)</th>', head)
header.append('Average (High + Low / 2)')
body = re.search(r'<tbody>(.*)</tbody>', html, re.DOTALL).group(1)
raw_rows = re.findall(r'<tr[^>]*>' + r'\s*<td[^>]*>([^<]+)</td>'*7 + r'\s*</tr>', body)
# strip commas
rows = []
for row in raw_rows:
row = [ field.translate(None, ',') for field in row ]
rows.append(row)
# calculate averages
def append_average(row):
high = float(row[header.index('High')])
low = float(row[header.index('Low')])
average = (high + low) / 2
row.append( '{:.2f}'.format(average) )
return row
rows = [ append_average(row) for row in rows ]
return header, rows
def render_csv_data(header, rows):
"""
Render the data in CSV format.
"""
print(','.join(header))
for row in rows:
print(','.join(row))
# --------------------------------------------- Util Methods -----------------------------------------------------------
def processDataFrame(df):
import pandas as pd
assert isinstance(df, pd.DataFrame), "df is not a pandas DataFrame."
cols = list(df.columns.values)
cols.remove('Date')
df.loc[:,'Date'] =
|
pd.to_datetime(df.Date)
|
pandas.to_datetime
|
import eurostat
#from eurostatapiclient import EurostatAPIClient
import statsmodels.api as sm
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import wbdata
countries=("AT","BE","BG","CH","CY","CZ","DE", "DK","ES","FI","FR","HR","HU","IE","IS","IT","LI","LT","LU","LV","MT","NL","NO","PL","PT","RO","SE","SI","SK","TR","UK")
countries=("AT","BE","BG","CY","CZ","DE", "DK","ES","FI","FR","HU","IE","IS","IT","LT","LU","LV","MT","NL","NO","PL","PT","SE","SI","SK","UK")
countries=("AT","BE","DE","DK","ES","FI","FR","IE","IS","IT","NL","NO","PL","PT","SE","TR","GB")
code = "sdg_17_50" #Share of environmental taxes in total tax revenues
#code = "urb_cenv"
#code = "env_ac_epneec"
code = "env_air_gge" # GHG Emmisions
#code = "ei_bssi_m_r2" # Sentiment
toc_df = eurostat.get_toc_df()
f = eurostat.subset_toc_df(toc_df, 'environment')
def fetch_ghg_pc():
# Eurostat Code
# Codes can be found at Eurostat data browser
# https://ec.europa.eu/eurostat/data/database
code = "t2020_rd300" #SGreenhouse gas emissions per capita [T2020_RD300]
df = eurostat.get_data_df(code, flags=False)
df.rename(columns={ df.columns[0]: "country" }, inplace = True)
#Only individual countries
df2 = df[df['country'].isin(list(countries))]
# Reshape to put Years from column headers (1st row) to a column
df2=df2.melt(id_vars=["country"],
var_name="year",
value_name="ghg_e_pc")
# Sort by geo and year
df2.sort_values(by=['country','year'],inplace = True)
return df2
def fetch_gdp():
# Eurostat Code - always lower case
# Codes can be found at Eurostat data browser
# https://ec.europa.eu/eurostat/data/database
code = "sdg_08_10" #
df = eurostat.get_data_df(code, flags=False)
df.rename(columns={ df.columns[2]: "country" }, inplace = True)
#Only individual countries
df2 = df[df['country'].isin(list(countries))]
# Drop 2019 columm before melt
del df2[2019]
# Also with melting :
# since this dataset contains two variables
# I'll split it into two data sets
df3=df2.melt(id_vars=["country", "unit"],
var_name="year",
value_name="temp_value")
df3.sort_values(by=['country','year'],inplace = True)
return (df3[df3['unit']=='CLV10_EUR_HAB'], df3[df3['unit']=='CLV_PCH_PRE_HAB'])
# rename that temp columns
# Reshape to put Years from column headers (1st row) to a column
def fetch_tax():
code = "sdg_17_50" #Share of environmental taxes in total tax revenues
df = eurostat.get_data_df(code, flags=False)
df.rename(columns={ df.columns[0]: "country" }, inplace = True)
df = df[df['country'].isin(list(countries))]
df=df.melt(id_vars=["country"],
var_name="year",
value_name="env_tax")
df.sort_values(by=['country','year'],inplace = True)
return df.query('year > 1999').query('year<2018')
ghg_pc_df = fetch_ghg_pc()
CLV_PCH_HAB_df,CLV_PCH_PRE_HAB_df = fetch_gdp()
gdp_df =
|
pd.merge(CLV_PCH_HAB_df, tax_env_share_df, how='left', left_on=['year','country'], right_on = ['year','country'])
|
pandas.merge
|
# Copyright 1999-2021 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
try:
import scipy.sparse as sps
except ImportError: # pragma: no cover
sps = None
from ... import opcodes
from ...core import ENTITY_TYPE, recursive_tile
from ...core.operand import OperandStage
from ...serialization.serializables import (
KeyField,
AnyField,
StringField,
Int64Field,
BoolField,
)
from ...tensor import tensor as astensor
from ...utils import lazy_import, pd_release_version
from ..core import Index as DataFrameIndexType, INDEX_TYPE
from ..initializer import Index as asindex
from ..operands import DataFrameOperand, DataFrameOperandMixin
from ..utils import validate_axis_style_args, parse_index
from .index_lib import DataFrameReindexHandler
cudf = lazy_import("cudf", globals=globals())
# under pandas<1.1, SparseArray ignores zeros on creation
_pd_sparse_miss_zero = pd_release_version[:2] < (1, 1)
class DataFrameReindex(DataFrameOperand, DataFrameOperandMixin):
_op_type_ = opcodes.REINDEX
_input = KeyField("input")
_index = AnyField("index")
_index_freq = AnyField("index_freq")
_columns = AnyField("columns")
_method = StringField("method")
_level = AnyField("level")
_fill_value = AnyField("fill_value")
_limit = Int64Field("limit")
_enable_sparse = BoolField("enable_sparse")
def __init__(
self,
index=None,
index_freq=None,
columns=None,
method=None,
level=None,
fill_value=None,
limit=None,
enable_sparse=None,
**kw,
):
super().__init__(
_index=index,
_index_freq=index_freq,
_columns=columns,
_method=method,
_level=level,
_fill_value=fill_value,
_limit=limit,
_enable_sparse=enable_sparse,
**kw,
)
@property
def input(self):
return self._input
@property
def index(self):
return self._index
@property
def index_freq(self):
return self._index_freq
@property
def columns(self):
return self._columns
@property
def method(self):
return self._method
@property
def level(self):
return self._level
@property
def fill_value(self):
return self._fill_value
@property
def limit(self):
return self._limit
@property
def enable_sparse(self):
return self._enable_sparse
@property
def _indexes(self):
# used for index_lib
indexes = []
names = ("index", "columns")
for ax in range(self.input.ndim):
index = names[ax]
val = getattr(self, index)
if val is not None:
indexes.append(val)
else:
indexes.append(slice(None))
return indexes
@_indexes.setter
def _indexes(self, new_indexes):
for index_field, new_index in zip(["_index", "_columns"], new_indexes):
setattr(self, index_field, new_index)
@property
def indexes(self):
return self._indexes
@property
def can_index_miss(self):
return True
def _new_chunks(self, inputs, kws=None, **kw):
if self.stage == OperandStage.map and len(inputs) < len(self._inputs):
assert len(inputs) == len(self._inputs) - 1
inputs.append(self._fill_value.chunks[0])
if self.stage == OperandStage.agg and self._fill_value is not None:
# fill_value is not required
self._fill_value = None
return super()._new_chunks(inputs, kws=kws, **kw)
def _set_inputs(self, inputs):
super()._set_inputs(inputs)
inputs_iter = iter(self._inputs)
self._input = next(inputs_iter)
if self._index is not None and isinstance(self._index, ENTITY_TYPE):
self._index = next(inputs_iter)
if self._fill_value is not None and isinstance(self._fill_value, ENTITY_TYPE):
self._fill_value = next(inputs_iter)
def __call__(self, df_or_series):
inputs = [df_or_series]
shape = list(df_or_series.shape)
index_value = df_or_series.index_value
columns_value = dtypes = None
if df_or_series.ndim == 2:
columns_value = df_or_series.columns_value
dtypes = df_or_series.dtypes
if self._index is not None:
shape[0] = self._index.shape[0]
index_value = asindex(self._index).index_value
self._index = astensor(self._index)
if isinstance(self._index, ENTITY_TYPE):
inputs.append(self._index)
if self._columns is not None:
shape[1] = self._columns.shape[0]
dtypes = df_or_series.dtypes.reindex(index=self._columns).fillna(
np.dtype(np.float64)
)
columns_value = parse_index(dtypes.index, store_data=True)
if self._fill_value is not None and isinstance(self._fill_value, ENTITY_TYPE):
inputs.append(self._fill_value)
if df_or_series.ndim == 1:
return self.new_series(
inputs,
shape=tuple(shape),
dtype=df_or_series.dtype,
index_value=index_value,
name=df_or_series.name,
)
else:
return self.new_dataframe(
inputs,
shape=tuple(shape),
dtypes=dtypes,
index_value=index_value,
columns_value=columns_value,
)
@classmethod
def tile(cls, op):
if all(len(inp.chunks) == 1 for inp in op.inputs):
# tile one chunk
out = op.outputs[0]
chunk_op = op.copy().reset_key()
chunk_params = out.params.copy()
chunk_params["index"] = (0,) * out.ndim
out_chunk = chunk_op.new_chunk(
[inp.chunks[0] for inp in op.inputs], kws=[chunk_params]
)
params = out.params.copy()
params["nsplits"] = ((s,) for s in out.shape)
params["chunks"] = [out_chunk]
new_op = op.copy()
return new_op.new_tileables(op.inputs, kws=[params])
handler = DataFrameReindexHandler()
result = yield from handler.handle(op)
if op.method is None and op.fill_value is None:
return [result]
else:
axis = 1 if op.columns is not None and op.index is None else 0
result = result.fillna(
value=op.fill_value, method=op.method, axis=axis, limit=op.limit
)
return [(yield from recursive_tile(result))]
@classmethod
def _get_value(cls, ctx, obj):
if obj is not None and hasattr(obj, "key"):
return ctx[obj.key]
return obj
@classmethod
def _convert_to_writable(cls, obj):
if isinstance(obj, np.ndarray) and not obj.flags.writeable:
return obj.copy()
return obj
@classmethod
def _sparse_reindex(cls, inp, index=None, columns=None):
if inp.ndim == 2:
columns = inp.columns if columns is None else columns
index_shape = len(index) if index is not None else len(inp)
i_to_columns = dict()
for i, col in enumerate(columns):
if col in inp.dtypes:
if index is None:
i_to_columns[i] = inp[col]
else:
indexer = inp.index.reindex(index)[1]
cond = indexer >= 0
available_indexer = indexer[cond]
del indexer
data = inp[col].iloc[available_indexer].to_numpy()
ind = cond.nonzero()[0]
spmatrix = sps.csc_matrix(
(data, (ind, np.zeros_like(ind))),
shape=(index_shape, 1),
dtype=inp[col].dtype,
)
# convert to SparseDtype(xxx, np.nan)
# to ensure 0 in sparse_array not converted to np.nan
if not _pd_sparse_miss_zero:
sparse_array = pd.arrays.SparseArray.from_spmatrix(spmatrix)
sparse_array = pd.arrays.SparseArray(
sparse_array.sp_values,
sparse_index=sparse_array.sp_index,
fill_value=np.nan,
dtype=pd.SparseDtype(sparse_array.dtype, np.nan),
)
else:
from pandas._libs.sparse import IntIndex
sparse_array = pd.arrays.SparseArray(
data,
sparse_index=IntIndex(index_shape, ind),
fill_value=np.nan,
dtype=
|
pd.SparseDtype(data.dtype, np.nan)
|
pandas.SparseDtype
|
"""
bzfunds.api
~~~~~~~~~~~
This module implements an interface to conveniently store and search
funds' data.
"""
import logging
import sys
from datetime import datetime
from typing import Optional, Union
import pandas as pd
import pymongo
from typeguard import typechecked
from . import settings
from .constants import API_FIRST_VALID_DATE
from .data import get_history
from .dbm import Manager
__all__ = ("download_data", "get_data")
logging.basicConfig(
stream=sys.stdout,
level=settings.LOGGING_LEVEL,
format=settings.LOGGING_FORMAT,
)
logger = logging.getLogger(__name__)
# Globals
# ----
DEFAULT_DB_MANAGER = Manager(**settings.MONGODB)
@typechecked
def download_data(
*,
start_year: Optional[Union[str, float]] = None,
update_only: bool = True,
manager: Manager = DEFAULT_DB_MANAGER,
):
"""Download available data and insert it into the database.
Must provide *either* a `start_year` or set `update_only=True`. Providing
both at the same time will raise `ValueError`.
...
Parameters
----------
start_year : `str` or `float`
starting year to query data. If not provided, defaults to last 5 years
update_only : `bool`
if True, will use the last available date in `manager` as the starting
query date (this is not a `diff` against the database!)
manager : `Manager`
loaded instance of database manager
"""
if not (start_year or update_only):
raise ValueError("Must provide a `start_year` or `update_only` flag")
elif start_year and update_only:
raise ValueError("Conflicting arguments")
if update_only:
cursor = manager.collection.find().limit(1).sort("date", pymongo.DESCENDING)
try:
start_dt = cursor[0]["date"]
except (IndexError, KeyError):
logger.warning("No previous data found. Querying all available history.")
else:
# Defaults to last 5 years if not provided
if not start_year:
start_year = (datetime.today() - pd.Timedelta(f"{int(365 * 5)}D")).year
start_dt = pd.to_datetime(f"{start_year}-01-01")
try:
_ = get_history(
start_dt=start_dt,
end_dt=datetime.today(),
commit=True,
manager=manager,
)
except ValueError as e:
logger.error(e)
@typechecked
def get_data(
funds: Optional[Union[str, list]] = None,
start_dt: Optional[Union[str, datetime]] = None,
end_dt: Optional[Union[str, datetime]] = None,
manager: Manager = DEFAULT_DB_MANAGER,
) -> Optional[pd.DataFrame]:
"""Easily query the database.
...
Parameters
----------
funds : `str` or `list`
start_dt : `str` or `datetime`
string must be in YYYY-MM-DD format
end_dt : `str` or `datetime`
string must be in YYYY-MM-DD format
manager : `Manager`
loaded instance of database manager
"""
if isinstance(funds, str):
funds = [funds]
search = {}
if funds:
search["fund_cnpj"] = {"$in": funds}
if start_dt or end_dt:
search["date"] = {}
if start_dt:
search["date"]["$gte"] =
|
pd.to_datetime(start_dt)
|
pandas.to_datetime
|
import pandas as pd
import numpy as np
from os import path
import math as m
import sys
import os
def topsis(dfg,wei,val,result):
if not os.path.isfile(dfg):
print(f"err : No such file exist")
exit(1)
if (os.path.splitext(dfg))[1]!=".csv" :
print(f"err : wrong file type")
exit(1)
if ((os.path.splitext(result))[1])!=".csv":
print("err : Output file must be .csv")
exit(1)
else:
ds =
|
pd.read_csv(dfg)
|
pandas.read_csv
|
import pandas as pd
import numpy as np
import openml
from pandas.api.types import is_numeric_dtype
from sklearn.model_selection import cross_validate, train_test_split, GridSearchCV, RandomizedSearchCV
from sklearn.metrics import f1_score, mean_squared_error
from sklearn.pipeline import Pipeline
from statistics import stdev
from warnings import filterwarnings, resetwarnings
from time import time
from datetime import datetime
from os import mkdir, listdir
from shutil import rmtree
import concurrent
import matplotlib.pyplot as plt
import seaborn as sns
from multiprocessing import Process, Queue
def get_single_dataset(q, dataset_did, dataset_name):
dataset = openml.datasets.get_dataset(dataset_did)
print(f" Loaded {dataset_name} from openml.org")
q.put(dataset)
class DatasetsTester:
"""
Tool to compare predictors (classifiers or regressors) on a set of datasets collected from openml.org.
This simplifies automatically comparing the performance of predictors on potentially large numbers
of datasets, thereby supporting more thorough and accurate testing of predictors.
"""
# have the directories and problem type set here
def __init__(self, problem_type, path_local_cache=""):
"""
problem_type: str
Either "classification" or "regression"
All estimators will be compared using the same metric, so it is necessary that all
datasets used are of the same type.
path_local_cache: str
Folder identify the local cache of datasets, stored in .csv format.
"""
self.problem_type = problem_type
self.path_local_cache = path_local_cache
self.openml_df = None
def check_problem_type(self):
problem_type_okay = self.problem_type in ["classification", "regression", "both"]
if not problem_type_okay:
print("problem_type must be one of: 'classification', 'regression', 'both'")
return problem_type_okay
def find_by_name(self, names_arr):
"""
Identifies, but does not collect, the set of datasets meeting the specified set of names.
Parameters
----------
names_arr: array of dataset names
Returns
-------
dataframe with a row for each dataset on openml meeting the specified set of names.
"""
if not self.check_problem_type():
return None
self.openml_df = openml.datasets.list_datasets(output_format="dataframe")
self.openml_df = self.openml_df[self.openml_df.name.isin(names_arr)]
return self.openml_df
def find_by_tag(self, my_tag):
"""
Identifies, but does not collect, the set of datasets attached to the specified tag.
Parameters
----------
my_tag: the dataset tag
Returns
-------
dataframe with a row for each dataset on openml meeting the specified tag.
"""
if not self.check_problem_type():
return None
self.openml_df = openml.datasets.list_datasets(tag=my_tag, output_format="dataframe")
return self.openml_df
def find_datasets(self,
use_cache=True,
min_num_classes=2,
max_num_classes=10,
min_num_minority_class=5,
max_num_minority_class=np.inf,
min_num_features=0,
max_num_features=100,
min_num_instances=500,
max_num_instances=5000,
min_num_numeric_features=0,
max_num_numeric_features=50,
min_num_categorical_features=0,
max_num_categorical_features=50):
"""
Identifies, but does not collect, the set of datasets meeting the specified set of names.
This, find_by_name(), or find_by_tag() must be called to identify the potential set of datasets to be collected.
Parameters
----------
All other parameters are direct checks of the statistics about each dataset provided by openml.org.
Returns
-------
dataframe with a row for each dataset on openml meeting the specified set of criteria.
"""
if not self.check_problem_type():
return None
if self.problem_type == "classification" and (min_num_classes <= 0 or max_num_classes <= 0):
print("For classification datasets, both min_num_classes and max_num_classes must be specified.")
return None
read_dataset_list = False # Set True if manage to read from cache. Otherwise read from openml.org.
if use_cache and self.path_local_cache != "":
try:
path_to_file = self.path_local_cache + "/dataset_list.csv"
self.openml_df = pd.read_csv(path_to_file)
read_dataset_list = True
except Exception as e:
if "No such file or directory:" not in str(e):
print(f" Error reading file: {e}")
else:
print(" File not found in cache.")
if not read_dataset_list:
self.openml_df = openml.datasets.list_datasets(output_format="dataframe")
if use_cache and self.path_local_cache != "":
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
path_to_file = self.path_local_cache + "/dataset_list.csv"
self.openml_df.to_csv(path_to_file)
# Filter out datasets where some key attributes are unspecified
self.openml_df = self.openml_df[
(np.isnan(self.openml_df.NumberOfFeatures) == False) &
(np.isnan(self.openml_df.NumberOfInstances) == False) &
(np.isnan(self.openml_df.NumberOfInstancesWithMissingValues) == False) &
(np.isnan(self.openml_df.NumberOfMissingValues) == False) &
(np.isnan(self.openml_df.NumberOfNumericFeatures) == False) &
(np.isnan(self.openml_df.NumberOfSymbolicFeatures) == False)
]
self.openml_df = self.openml_df[
(self.openml_df.NumberOfFeatures >= min_num_features) &
(self.openml_df.NumberOfFeatures <= max_num_features) &
(self.openml_df.NumberOfInstances >= min_num_instances) &
(self.openml_df.NumberOfInstances <= max_num_instances) &
(self.openml_df.NumberOfNumericFeatures >= min_num_numeric_features) &
(self.openml_df.NumberOfNumericFeatures <= max_num_numeric_features) &
(self.openml_df.NumberOfSymbolicFeatures >= min_num_categorical_features) &
(self.openml_df.NumberOfSymbolicFeatures <= max_num_categorical_features)
]
if self.problem_type == "classification":
self.openml_df = self.openml_df[
(np.isnan(self.openml_df.MajorityClassSize) == False) &
(np.isnan(self.openml_df.MaxNominalAttDistinctValues) == False) &
(np.isnan(self.openml_df.MinorityClassSize) == False) &
(np.isnan(self.openml_df.NumberOfClasses) == False)
]
self.openml_df = self.openml_df[
(self.openml_df.NumberOfClasses >= min_num_classes) &
(self.openml_df.NumberOfClasses <= max_num_classes) &
(self.openml_df.MinorityClassSize >= min_num_minority_class) &
(self.openml_df.MinorityClassSize <= max_num_minority_class)
]
if self.problem_type == "regression":
self.openml_df = self.openml_df[self.openml_df.NumberOfClasses == 0]
return self.openml_df
def collect_data(self,
max_num_datasets_used=-1,
method_pick_sets="pick_random",
shuffle_random_state=0,
exclude_list=None,
use_automatic_exclude_list=False,
max_cat_unique_vals=20,
keep_duplicated_names=False,
check_local_cache=False,
check_online=True,
save_local_cache=False,
preview_data=False,
one_hot_encode=True,
fill_nan_and_inf_zero=True,
verbose=False):
"""
This method collects the data from openml.org, unless check_local_cache is True and the dataset is available
in the local folder. This will collect the specified subset of datasets identified by the most recent call
to find_by_name() or find_datasets(). This allows users to call those methods until a suitable
collection of datasets have been identified.
Parameters
----------
max_num_datasets_used: integer
The maximum number of datasets to collect.
method_pick_sets: str
If only a subset of the full set of matches are to be collected, this identifies if those
will be selected randomly, or simply using the first matches
shuffle_random_state: int
Where method_pick_sets is "pick_random", this is used to shuffle the order of the datasets
exclude_list: array
list of names of datasets to exclude
use_automatic_exclude_list: bool
If set True, any files that can't be loaded will be appended to a list and subsequent calls will not attempt
to load them. This may be set to save time. However, if there are errors simply due to internet problems or
temporary issues, this may erroneously exclude some datasets.
max_cat_unique_vals: int
As categorical columns are one-hot encoded, it may not be desirable to one-hot encode categorical
columns with large numbers of unique values. Columns with a greater number of unique values than
max_cat_unique_vals will be dropped.
keep_duplicated_names: bool
If False, for each set of datasets with the same name, only the one with the highest
version number will be used. In some cases, different versions of a dataset are significantly different.
save_local_cache: bool
If True, any collected datasets will be saved locally in path_local_cache
check_local_cache: bool
If True, before collecting any datasets from openml.org, each will be checked to determine if
it is already stored locally in path_local_cache
check_online: bool
If True, openml.org may be checked for the dataset, unless check_local_cache is True and the dataset has
been cached.
preview_data: bool
Indicates if the first rows of each collected dataset should be displayed
one_hot_encode: bool
If True, categorical columns are one-hot encoded. This is necessary for many types of predictor, but
may be done elsewhere, for example in a pipeline passed to the run_tests() function.
fill_nan_and_inf_zero: bool
If True, all instances of NaN, inf and -inf are replaced with 0.0. Replacing these values with something
valid is necessary for many types of predictor, butmay be done elsewhere, for example in a pipeline passed
to the run_tests() function.
verbose: bool
If True, messages will be displayed indicating errors collecting any datasets.
Returns
-------
dataset_collection: dictionary containing: index in this collection, dataset_name, version, X, y
This method will attempt to collect as many datasets as specified, even where additional datasets must
be examined.
"""
def append_auto_exclude_list(did):
if not use_automatic_exclude_list:
return
auto_exclude_list.append(did)
def read_auto_exclude_list():
nonlocal auto_exclude_list
if not use_automatic_exclude_list or self.path_local_cache == "":
return
try:
path_to_file = self.path_local_cache + "/exclude_list.csv"
auto_list_df = pd.read_csv(path_to_file)
except Exception as e:
print(f" Error reading file: {e}")
return
auto_exclude_list = auto_list_df['List'].tolist()
def save_auto_exclude_list():
nonlocal auto_exclude_list
if not use_automatic_exclude_list or self.path_local_cache == "" or len(auto_exclude_list) == 0:
return
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
path_to_file = self.path_local_cache + "/exclude_list.csv"
pd.DataFrame({'List': auto_exclude_list}).to_csv(path_to_file)
assert method_pick_sets in ['pick_first', 'pick_random']
q = Queue()
if self.openml_df is None or len(self.openml_df) == 0:
print("Error. No datasets specified. Call find_datasets() or find_by_name() before collect_data().")
return None
if not keep_duplicated_names:
self.openml_df = self.openml_df.drop_duplicates(subset=["name"], keep="last")
self.dataset_collection = []
#if max_num_datasets_used > -1 and max_num_datasets_used < len(self.openml_df) and method_pick_sets == "pick_random":
if -1 < max_num_datasets_used < len(self.openml_df) and method_pick_sets == "pick_random":
openml_subset_df = self.openml_df.sample(frac=1, random_state=shuffle_random_state)
else:
openml_subset_df = self.openml_df
auto_exclude_list = []
read_auto_exclude_list()
usable_dataset_idx = 0
for dataset_idx in range(len(openml_subset_df)):
if (max_num_datasets_used > -1) and (len(self.dataset_collection) >= max_num_datasets_used):
break
dataset_did = int(openml_subset_df.iloc[dataset_idx].did)
dataset_name = openml_subset_df.iloc[dataset_idx]['name']
dataset_version = openml_subset_df.iloc[dataset_idx]['version']
if not exclude_list is None and dataset_name in exclude_list:
continue
if dataset_did in auto_exclude_list:
continue
print(f"Collecting {usable_dataset_idx}: {dataset_name}")
dataset_df = None
dataset_source = ""
if check_local_cache:
try:
path_to_file = self.path_local_cache + "/" + dataset_name + '.csv'
X_with_y = pd.read_csv(path_to_file)
dataset_df = X_with_y.drop("y", axis=1)
y = X_with_y["y"]
dataset_source = "cache"
except Exception as e:
if "No such file or directory:" not in str(e):
print(f" Error reading file: {e}")
else:
print(" File not found in cache.")
dataset_df = None
if not check_online and dataset_df is None:
continue
if dataset_df is None:
p = Process(target=get_single_dataset, name="get_single_dataset", args=(q, dataset_did, dataset_name))
p.start()
p.join(timeout=20)
if q.empty():
print(f" Unable to collect {dataset_name} from openml.org")
append_auto_exclude_list(dataset_did)
continue
dataset = q.get()
try:
X, y, categorical_indicator, attribute_names = dataset.get_data(
dataset_format="dataframe",
target=dataset.default_target_attribute
)
except Exception as e:
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. Error: {e}")
append_auto_exclude_list(dataset_did)
continue
if X is None or y is None:
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. X or y is None")
append_auto_exclude_list(dataset_did)
continue
dataset_df = pd.DataFrame(X, columns=attribute_names)
if len(dataset_df) != len(y):
if verbose:
print(f" Error collecting file with did: {dataset_did}, name: {dataset_name}. Number rows in X: {len(X)}. Number rows in y: {len(y)}")
append_auto_exclude_list(dataset_did)
continue
if preview_data:
print(dataset_df.head())
if save_local_cache:
try:
mkdir(self.path_local_cache)
except FileExistsError:
pass
except Exception as e:
print(f"Error creating local cache folder: {e}")
X_with_y = dataset_df.copy()
X_with_y['y'] = y
X_with_y.to_csv(self.path_local_cache + "/" + dataset_name + '.csv', index=False)
if (self.problem_type == "regression") and (is_numeric_dtype(y) == False):
continue
if dataset_source == "cache":
print(f" Reading from local cache: {usable_dataset_idx}, id: {dataset_did}, name: {dataset_name}")
else:
print(f" Loading dataset from openml: {usable_dataset_idx}, id: {dataset_did}, name: {dataset_name}")
dataset_df = self.__clean_dataset(dataset_df, max_cat_unique_vals, one_hot_encode,
fill_nan_and_inf_zero)
self.dataset_collection.append({'Index': usable_dataset_idx,
'Dataset_name': dataset_name,
'Dataset_version': dataset_version,
'X': dataset_df,
'y': y})
usable_dataset_idx += 1
save_auto_exclude_list()
def __clean_dataset(self, X, max_cat_unique_vals, one_hot_encode, fill_nan_and_inf_zero):
# The categorical_indicator provided by openml isn't 100% reliable, so we also check panda's is_numeric_dtype
categorical_indicator = [False] * len(X.columns)
for c in range(len(X.columns)):
if not is_numeric_dtype(X[X.columns[c]]):
categorical_indicator[c] = True
# Remove any NaN or inf values
if fill_nan_and_inf_zero:
for c_idx, col_name in enumerate(X.columns):
if categorical_indicator[c_idx] == True:
if hasattr(X[col_name], "cat"):
X[col_name] = X[col_name].cat.add_categories("").fillna("")
else:
X[col_name] = X[col_name].fillna("")
else:
X[col_name] = X[col_name].fillna(0.0)
# One-hot encode the categorical columns
if one_hot_encode:
new_df = pd.DataFrame()
for c in range(len(categorical_indicator)):
col_name = X.columns[c]
if categorical_indicator[c] == True:
if X[col_name].nunique() > max_cat_unique_vals:
pass
else:
one_hot_cols = pd.get_dummies(X[col_name], prefix=col_name, dummy_na=True, drop_first=False)
new_df = pd.concat([new_df, one_hot_cols], axis=1)
else:
new_df[col_name] = X[col_name]
X = new_df
return X.reset_index(drop=True)
def get_dataset_collection(self):
return self.dataset_collection
def get_dataset(self, dataset_name):
"""
Returns a single dataset.
Parameters
----------
dataset_name: str
The name as it appears in the openml list of datasets
Returns
-------
Returns both the X and y. Returns the first match if multiple versions are present.
"""
for dataset_dict in self.dataset_collection:
# if dataset_name == dataset_name_:
if dataset_dict['Dataset_name'] == dataset_name:
return dataset_dict['X'], dataset_dict['y']
return None, None
def run_tests(self,
estimators_arr,
num_cv_folds=5,
scoring_metric='',
show_warnings=False,
starting_point=0,
ending_point=np.inf,
partial_result_folder="",
results_folder="",
run_parallel=False):
"""
Evaluate all estimators on all datasets.
Parameters
----------
estimators_arr: array of tuples, with each tuple containing:
str: estimator name,
str: a description of the features used
str: a description of the hyper-parameters used
estimator: the estimator to be used. This should not be fit yet, just have the hyper-parameters set.
num_cv_folds: int
the number of folds to be used in the cross validation process used to evaluate the predictor
scoring_metric: str
one of the set of scoring metrics supported by sklearn. Set to '' to indicate to use the default.
The default for classification is f1_macro and for regression is normalized root mean square error.
show_warnings: bool
if True, warnings will be presented for calls to cross_validate(). These can get very long in in some
cases may affect only a minority of the dataset-predictor combinations, so is False by default. Users
may wish to set to True to determine the causes of any NaNs in the final summary dataframe.
starting_point: int
This may be used to resume long-running tests where previous runs have not completed the full test or
where previous calls to this method set ending_point
ending_point: int
This may be used to divide up the datasets, potentially to spread the work over a period of time, or
to use some datasets purely for testing.
partial_result_folder: string
path to folder where partial results are saved.
results_folder: string
path to folder where results are saved.
run_parallel: bool
If set to True, the datasets will be tested in parallel. This speeds up computation, but is set to
False by default as it makes the print output harder to follow and the process of recovering from
partial runs more complicated.
Returns
-------
a dataframe summarizing the performance of the estimators on each dataset. There is one row
for each combination of dataset and estimator.
the name of the saved results if any were saved
"""
self.estimators_arr = estimators_arr
scoring_metric_specified = True
if self.problem_type == "classification":
if scoring_metric == '':
scoring_metric_specified = False
scoring_metric = 'f1_macro'
elif self.problem_type == "regression":
if scoring_metric == '':
scoring_metric_specified = False
scoring_metric = 'neg_root_mean_squared_error'
else:
assert False, "problem type must be 'classification' or 'regression' if running tests. "
# Dataframes used to store the test results
column_names = ['Dataset Index',
'Dataset',
'Dataset Version',
'Model',
'Feature Engineering Description',
'Hyperparameter Description']
if scoring_metric_specified == False and self.problem_type == "regression":
column_names.append('Avg NRMSE')
else:
column_names.append('Avg ' + scoring_metric)
column_names += [
'Std dev between folds',
'Train-Test Gap',
'# Columns',
'Model Complexity',
'Fit Time']
summary_df = pd.DataFrame(columns=column_names)
if show_warnings:
filterwarnings('default')
else:
filterwarnings('ignore')
self.__create_folders(starting_point, ending_point, partial_result_folder, results_folder)
print(f"\nRunning test on {len(self.dataset_collection)} datastets")
if not run_parallel:
summary_df = self.run_subset(summary_df,
starting_point,
ending_point,
partial_result_folder,
num_cv_folds,
scoring_metric,
scoring_metric_specified)
else:
ending_point = min(ending_point, len(self.dataset_collection) - 1)
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for dataset_idx in range(starting_point, ending_point + 1):
print(f"Starting process for dataset: {dataset_idx}")
f = executor.submit(self.run_subset,
summary_df,
dataset_idx,
dataset_idx,
partial_result_folder,
num_cv_folds,
scoring_metric,
scoring_metric_specified)
process_arr.append(f)
for f in process_arr:
summary_df = summary_df.append(f.result())
resetwarnings()
if starting_point > 0 and partial_result_folder != "":
summary_df = self.__get_previous_results(summary_df, partial_result_folder)
summary_file_name = ""
if ending_point >= (len(self.dataset_collection) - 1) and results_folder != "" and len(summary_df) > 0:
n = datetime.now()
dt_string = n.strftime("%d_%m_%Y_%H_%M_%S")
summary_file_name = "results_" + dt_string
final_file_name = results_folder + "\\" + summary_file_name + ".csv"
print(f"Writing results to {final_file_name}")
summary_df.to_csv(final_file_name, index=False)
self.__remove_partial_results(partial_result_folder)
return summary_df.reset_index(drop=True), summary_file_name
def run_subset(self, summary_df, starting_point, ending_point, partial_result_folder, num_cv_folds, scoring_metric,
scoring_metric_specified):
for dataset_dict in self.dataset_collection:
# dataset_index, dataset_name, version, X, y = dataset_tuple
dataset_index = dataset_dict['Index']
dataset_name = dataset_dict['Dataset_name']
version = dataset_dict['Dataset_version']
X = dataset_dict['X']
y = dataset_dict['y']
# Normally the dataset_index values are sequential within the dataset_collection, but
# this handles where they are not.
if dataset_index < starting_point:
continue
if dataset_index > ending_point:
continue
print(f"Running tests on dataset index: {dataset_index}, dataset: {dataset_name}")
for estimator_desc in self.estimators_arr:
model_name, engineering_description, hyperparameters_description, clf = estimator_desc
print(
f"\tRunning tests with model: {model_name} ({engineering_description}), ({hyperparameters_description})")
scores = cross_validate(clf, X, y, cv=num_cv_folds, scoring=scoring_metric, return_train_score=True,
return_estimator=True)
print(f"\tscores for {model_name}: {scores['test_score']}")
train_scores = scores['train_score']
test_scores = scores['test_score']
if scoring_metric_specified == False and self.problem_type == "regression":
# Convert from neg_root_mean_squared_error to NRMSE
train_scores = abs(train_scores / (y.mean()))
test_scores = abs(test_scores / (y.mean()))
avg_test_score = test_scores.mean()
scores_std_dev = stdev(test_scores)
avg_train_score = train_scores.mean()
avg_fit_time = scores['fit_time'].mean()
# Model Complexity is currently only supported for decision trees, and measures the number of nodes.
estimators_arr = scores['estimator']
if type(estimators_arr[0]) == Pipeline:
for p_idx in range(len(estimators_arr[0])):
short_estimators_arr = [e[p_idx] for e in estimators_arr]
model_complexity = self.__check_model_complexity(short_estimators_arr)
if model_complexity > 0:
break
else:
model_complexity = self.__check_model_complexity(estimators_arr)
summary_row = [dataset_index,
dataset_name,
version,
model_name,
engineering_description,
hyperparameters_description,
avg_test_score,
scores_std_dev,
avg_train_score - avg_test_score,
len(X.columns),
model_complexity,
avg_fit_time]
summary_df = summary_df.append(pd.DataFrame([summary_row], columns=summary_df.columns))
if (partial_result_folder != ""):
intermediate_file_name = partial_result_folder + "\\intermediate_" + str(dataset_index) + ".csv"
summary_df.to_csv(intermediate_file_name, index=False)
return summary_df
def run_tests_parameter_search(
self,
estimators_arr,
parameters_arr,
search_method='random',
num_cv_folds=5,
scoring_metric='',
show_warnings=False,
starting_point=0,
ending_point=np.inf,
partial_result_folder="",
results_folder="",
run_parallel=False):
"""
Evaluate all estimators on all datasets.
Parameters
----------
All parameters are the same as in run_tests() with the addition of:
parameters_arr: array of dictionaries
Each dictionary describes the range of parameters to be tested on the matching estimator
search_method: str
Either "grid" or "random"
Returns
-------
a dataframe summarizing the performance of the estimators on each dataset. There is one row
for each combination of dataset and estimator.
"""
assert search_method in ['grid', 'random']
self.estimators_arr = estimators_arr
self.parameters_arr = parameters_arr
scoring_metric_specified = True
if self.problem_type == "classification":
if scoring_metric == '':
scoring_metric_specified = False
scoring_metric = 'f1_macro'
elif self.problem_type == "regression":
if scoring_metric == '':
scoring_metric_specified = False
scoring_metric = 'neg_root_mean_squared_error'
else:
assert False, "problem_type must be 'classification' or 'regression' to run tests."
# Dataframes used to store the test results
column_names = ['Dataset Index',
'Dataset',
'Dataset Version',
'Model',
'Feature Engineering Description',
'Hyperparameter Description']
if not scoring_metric_specified and self.problem_type == "regression":
column_names.append('NRMSE')
else:
column_names.append(scoring_metric)
column_names += [
'Train-Test Gap',
'# Columns',
'Model Complexity',
'Fit Time',
'Best Hyperparameters']
summary_df = pd.DataFrame(columns=column_names)
if show_warnings:
filterwarnings('default')
else:
filterwarnings('ignore')
self.__create_folders(starting_point, ending_point, partial_result_folder, results_folder)
print(f"\nRunning test on {len(self.dataset_collection)} datastets")
if not run_parallel:
summary_df = self.run_subset_cv_parameter_search(
summary_df,
starting_point,
ending_point,
partial_result_folder,
num_cv_folds,
search_method,
scoring_metric,
scoring_metric_specified)
else:
ending_point = min(ending_point, len(self.dataset_collection) - 1)
process_arr = []
with concurrent.futures.ProcessPoolExecutor() as executor:
for dataset_idx in range(starting_point, ending_point + 1):
print(f"Starting process for dataset: {dataset_idx}")
f = executor.submit(self.run_subset_cv_parameter_search,
summary_df,
dataset_idx,
dataset_idx,
partial_result_folder,
num_cv_folds,
search_method,
scoring_metric,
scoring_metric_specified)
process_arr.append(f)
for f in process_arr:
summary_df = summary_df.append(f.result())
resetwarnings()
if starting_point > 0 and partial_result_folder != "":
summary_df = self.__get_previous_results(summary_df, partial_result_folder)
summary_file_name = ""
if ending_point >= (len(self.dataset_collection) - 1) and results_folder != "" and len(summary_df) > 0:
n = datetime.now()
dt_string = n.strftime("%d_%m_%Y_%H_%M_%S")
summary_file_name = "results_" + dt_string
final_file_name = results_folder + "\\" + summary_file_name + ".csv"
print(f"Writing results to {final_file_name}")
summary_df.to_csv(final_file_name, index=False)
self.__remove_partial_results(partial_result_folder)
return summary_df.reset_index(drop=True), summary_file_name
def run_subset_cv_parameter_search(self,
summary_df,
starting_point,
ending_point,
partial_result_folder,
num_cv_folds,
search_method,
scoring_metric,
scoring_metric_specified):
for dataset_dict in self.dataset_collection:
dataset_index = dataset_dict['Index']
dataset_name = dataset_dict['Dataset_name']
version = dataset_dict['Dataset_version']
X = dataset_dict['X']
y = dataset_dict['y']
if dataset_index < starting_point:
continue
if dataset_index > ending_point:
continue
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
print(f"Running tests on dataset: {dataset_index}: {dataset_name}")
for estimator_idx, estimator_desc in enumerate(self.estimators_arr):
model_name, engineering_description, hyperparameters_description, estimator = estimator_desc
parameters = self.parameters_arr[estimator_idx]
print(
f"\tRunning tests with model: {model_name} ({engineering_description}), ({hyperparameters_description})")
if search_method == "grid":
gs_estimator = GridSearchCV(estimator, parameters, scoring=scoring_metric)
elif search_method == "random":
gs_estimator = RandomizedSearchCV(estimator, parameters, scoring=scoring_metric, n_iter=10)
start_time = time()
gs_estimator.fit(X_train, y_train)
end_time = time()
y_pred_train = gs_estimator.predict(X_train)
y_pred_test = gs_estimator.predict(X_test)
if self.problem_type == "classification":
if scoring_metric == "f1_macro":
train_score = f1_score(list(y_pred_train), list(y_train), average="macro")
test_score = f1_score(list(y_pred_test), list(y_test), average="macro")
else:
assert False, "Only f1_macro currently supported."
else:
if self.problem_type == "regression":
if scoring_metric_specified == False or scoring_metric == "neg_root_mean_squared_error" or scoring_metric == "NRMSE":
train_score = (-1) * mean_squared_error(y_train, y_pred_train)
test_score = (-1) * mean_squared_error(y_test, y_pred_test)
if not scoring_metric_specified:
# Convert from neg_root_mean_squared_error to NRMSE
train_score = abs(train_score / (y.mean()))
test_score = abs(test_score / (y.mean()))
else:
assert False, "Only NRMSE and neg_root_mean_squared_error currently supported,"
print("\ttest_score: ", test_score)
if type(gs_estimator.best_estimator_) == Pipeline:
for p_idx in range(len(gs_estimator.best_estimator_)):
est = gs_estimator.best_estimator_[p_idx]
model_complexity = self.__check_model_complexity([est])
if (model_complexity > 0):
break
else:
model_complexity = self.__check_model_complexity([gs_estimator.best_estimator_])
summary_row = [dataset_index,
dataset_name,
version,
model_name,
engineering_description,
hyperparameters_description,
test_score,
train_score - test_score,
len(X.columns),
model_complexity,
round(end_time - start_time, 2),
str(gs_estimator.best_params_)]
summary_df = summary_df.append(
|
pd.DataFrame([summary_row], columns=summary_df.columns)
|
pandas.DataFrame
|
# File: yta.py
# Author: <NAME>
#
# MIT License
#
# Copyright (c) 2020 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --- IMPORTS ---
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# ---------------
# --- FUNCTIONS TO CLASSIFY ELEMENTS ---
def classify_views(element):
if element > 1000000:
return 'Above one million'
else:
return 'Below one million'
def classify_likes(element):
if element > 20000:
return 'Above 20k'
else:
return 'Below 20k'
def classify_dislikes(element):
if element > 1000:
return 'Above 1k'
else:
return 'Below 1k'
def classify_comments(element):
if element > 1000:
return 'Above 1k'
else:
return 'Below 1k'
# ---------------
# --- DATA ACQUISITION ---
data_canada =
|
pd.read_csv('input/CAvideos.csv', encoding='utf8')
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from . import helpers
import os
import pandas as pd
import csv
import urllib.request
import json
import numpy as np
class DLGeoJSON:
""" DLGeoJSON
Library for downloading prefecture/city boarder geojson files of Japan.
This module search and download prefecture/city boarder line geojson files
from github(https://github.com/niiyz/JapanCityGeoJson).
Boarder json files will be donloaded to selected directory (default is ./geojson).
Also, you can import json files to your program as pandas DataFrame.
Example (Automatic Download and import):
1. Import package to your program.
`import dl_jp_geojson as dl`
2. Initiate the instance.
`dlgeojson = dl.core.DLGeoJSON()`
3. Search geojson and return pandas DataFrame
`jsondf = dlgeojson.read_geo_json("宮古")`
Example (Manual Download and import):
1. Import package to your program.
`import dl_jp_geojson as dl`
2. Initiate the instance.
`dlgeojson = dl.core.DLGeoJSON()`
3. Search json file by prefecture or city name.
`founddf = dlgeojson.search_word("宮古")`
4. Download json files.
`dlgeojson.download_files(founddf)`
5. Import json files to pandas
`jsondf = dlgeojson.import2pandas(founddf)`
Output pandas format:
Using codes demostrated above will return pandas DataFrame `jsondf` as following format.
lons lats
0 [142.020434228, 142.020442892, 142.020451141, ... [39.499224532, 39.499222279, 39.499222306, 39....
1 [142.019816978, 142.019806744, 142.019796498, ... [39.499232838, 39.499232559, 39.499232586, 39....
2 [142.0174893, 142.017482477, 142.017477497, 14... [39.499561559, 39.499561532, 39.499564, 39.499...
------------------------------------------------------------------
Methods:
__init__(dir='./geojson')
Initiate instance.
Parameters:
- dir [str]
Specifies the directory to download geojson files.
Default is `./geojson`.
search_word(wordin, algorithm="or")
Search geojson files including `wordin` phrase.
Both prefecture name and city name is searched.
Returns found geojson file information in pandas.DataFrame format
with columns: ["prefecture_name", "prefecture_code", "city_name", "city_code"].
Parameters:
- wordin [str or List[str]]
Search word phrase. Required.
Both str and List[str] is accepted.
- algorithm="or" ["or" or "and"]
Search algorithm for multiple phrases given at wordin.
"or": search phrases with `or`. (i.e. maches with geojson which has at least 1 phrase in its name)
"and": search phrases with `and`. (i.e. maches with geojson which has all phrases in its name)
Only valid when List[str] is given as wordin.
Default is "or".
download_files(founddf)
Download geojson files.
Geojson data specified in each row of `founddf`(pandas.DataFrame returned from search_word()) will be downloaded.
Parameters:
- founddf [pandas.DataFrame]
Desired geojson information.
Return data of search_word() should be specified here.
import2pandas(founddf)
Import geojson data as pandas.DataFrame.
Geojson data specified in each row of `founddf`(pandas.DataFrame returned from search_word()) will be downloaded.
This should be called after download is completed.
Parameters:
- founddf [pandas.DataFrame]
Desired geojson information.
Return data of search_word() should be specified here.
Output pandas format:
lons lats
0 [142.020434228, 142.020442892, 142.020451141, ... [39.499224532, 39.499222279, 39.499222306, 39....
1 [142.019816978, 142.019806744, 142.019796498, ... [39.499232838, 39.499232559, 39.499232586, 39....
2 [142.0174893, 142.017482477, 142.017477497, 14... [39.499561559, 39.499561532, 39.499564, 39.499...
read_geo_json(wordin, algorithm="or")
Method for automatically process search_word(),download_files(), and import2pandas().
Parameters:
- wordin [str or List[str]]
Search word phrase. Required.
Both str and List[str] is accepted.
- algorithm="or" ["or" or "and"]
Search algorithm for multiple phrases given at wordin.
"or": search phrases with `or`. (i.e. maches with geojson which has at least 1 phrase in its name)
"and": search phrases with `and`. (i.e. maches with geojson which has all phrases in its name)
Only valid when List[str] is given as wordin.
Default is "or".
"""
def __init__(self, dir='./geojson'):
"""__init__(dir='./geojson')
Initiate instance.
Parameters:
- dir [str]
Specifies the directory to download geojson files.
Default is `./geojson`.
"""
self.directory = self.set_directory(dir)
self.df = self.readcsv()
def set_directory(self, dir):
if self.find_directory(dir):
pass
else:
self.make_directory(dir)
return dir
def find_directory(self, dir):
return os.path.exists(dir)
def make_directory(self, dir):
os.mkdir(dir)
def readcsv(self):
pkjpath = os.path.abspath(os.path.dirname(__file__))
csvdf = pd.read_csv("{}/data/prefecture_city.csv".format(pkjpath))
csvdf = csvdf.fillna(0)
csvdf["prefecture_code"] = csvdf["prefecture_code"].astype(int)
csvdf["city_code"] = csvdf["city_code"].astype(int)
return csvdf
def search_word(self, wordin, algorithm="or"):
"""search_word(wordin, algorithm="or")
Search geojson files including `wordin` phrase.
Returns found geojson file information in pandas.DataFrame format
with columns: ["prefecture_name", "prefecture_code", "city_name", "city_code"].
Parameters:
- wordin [str or List[str]]
Search word phrase. Required.
Both str and List[str] is accepted.
- algorithm="or" ["or" or "and"]
Search algorithm for multiple phrases given at wordin.
"or": search phrases with `or`. (i.e. maches with geojson which has at least 1 phrase in its name)
"and": search phrases with `and`. (i.e. maches with geojson which has all phrases in its name)
Only valid when List[str] is given as wordin.
Default is "or".
"""
if isinstance(wordin, str):
founddf = self.df.loc[self.df.loc[:, "prefecture_name"].str.contains(wordin) |
self.df.loc[:, "city_name"].str.contains(wordin)].copy()
elif isinstance(wordin, list):
founddf = False
targetdf = self.df.copy()
for word in wordin:
middf = targetdf.loc[targetdf.loc[:, "prefecture_name"].str.contains(word) |
targetdf.loc[:, "city_name"].str.contains(word)].copy()
if algorithm == "or":
if founddf is False:
founddf = middf
else:
founddf = pd.concat(founddf, middf)
elif algorithm == "and":
targetdf = middf
founddf = middf
return founddf[["prefecture_name", "prefecture_code", "city_name", "city_code"]]
def download_files(self, founddf, prefecture_mode=False):
"""download_files(founddf)
Download geojson files.
Geojson data specified in each row of `founddf`(pandas.DataFrame returned from search_word()) will be downloaded.
Parameters:
- founddf [pandas.DataFrame]
Desired geojson information.
Return data of search_word() should be specified here.
"""
if prefecture_mode is True:
founddf = founddf[
["prefecture_name", "prefecture_code"]].drop_duplicates()
for row in founddf.iterrows():
p_code = row[1]["prefecture_code"]
if prefecture_mode is False:
c_code = row[1]["city_code"]
else:
c_code = p_code
# check existing files:
c_str = '{0:05d}'.format(c_code)
fname = "{}.json".format(c_str)
pathfname = "{}/{}".format(self.directory, fname)
if self.find_directory(pathfname) is False:
# download files
self.download_json(p_code, c_code, prefecture_mode)
return None
def download_json(self, p_code, c_code, prefecture_mode=False):
if prefecture_mode is False:
p_str = '{0:02d}'.format(p_code)
c_str = '{0:05d}'.format(c_code)
url = "https://raw.githubusercontent.com/niiyz/JapanCityGeoJson/master/geojson/{}/{}.json".format(
p_str, c_str)
fname = "{}.json".format(c_str)
print(url)
urllib.request.urlretrieve(
url, "{}/{}".format(self.directory, fname))
else:
p_str = '{0:02d}'.format(p_code)
url = "https://raw.githubusercontent.com/niiyz/JapanCityGeoJson/master/geojson/prefectures/{}.json".format(
p_str)
fname = "{}.json".format(p_str)
print(url)
urllib.request.urlretrieve(
url, "{}/{}".format(self.directory, fname))
return None
def import2pandas(self, founddf):
"""import2pandas(founddf)
Import geojson data as pandas.DataFrame.
Geojson data specified in each row of `founddf`(pandas.DataFrame returned from search_word()) will be downloaded.
This should be called after download is completed.
Parameters:
- founddf [pandas.DataFrame]
Desired geojson information.
Return data of search_word() should be specified here.
Output pandas format:
lons lats
0 [142.020434228, 142.020442892, 142.020451141, ... [39.499224532, 39.499222279, 39.499222306, 39....
1 [142.019816978, 142.019806744, 142.019796498, ... [39.499232838, 39.499232559, 39.499232586, 39....
2 [142.0174893, 142.017482477, 142.017477497, 14... [39.499561559, 39.499561532, 39.499564, 39.499...
"""
geo_list = []
for row in founddf.iterrows():
p_code = row[1]["prefecture_code"]
c_code = row[1]["city_code"]
jsondata = self.readjson(p_code, c_code)
geo_list = geo_list + jsondata
imported_df =
|
pd.DataFrame(geo_list, columns=["lons", "lats"])
|
pandas.DataFrame
|
""" This file contains a class and methods for Non-REM EEG segments
Notes:
- Analysis should output # of NaNs in the data
TO DO:
- ** update analyze_spindles for psd_i removal
- Optimize self.create_spindfs() method
- Assign NREM attributes to slots on init
- Update docstrings
- ** Export SO and SPSO analyses
"""
import datetime
import glob
#import joblib
import json
import os
import math
import numpy as np
import pandas as pd
import warnings
import xlsxwriter
from mne.time_frequency import psd_array_multitaper
from scipy.signal import butter, sosfiltfilt, sosfreqz, find_peaks
from scipy.optimize import OptimizeWarning, curve_fit
class NREM:
""" General class for nonREM EEG segments """
def __init__(self, fname=None, fpath=None, match=None, in_num=None, epoched=False, batch=False, lowpass_freq=25, lowpass_order=4,
laplacian_chans=None, replace_data=False):
""" Initialize NREM object
Parameters
----------
fname: str
filename (if loading a single dataframe)
fpath: str
absolute path to file(s) directory
match: str
string to match within the filename of all files to load (Ex: '_s2_')
in_num: str
IN number, for batch loading
epoched: bool (default: False)
whether data has been epoched (if loading a single dataframe)
batch: bool (default: True)
whether to load all matching files from the fpath directory
lowpass_freq: int or None (default: 25)
lowpass filter frequency *Used in visualizations ONLY*. must be < nyquist
lowpass_order: int (default: 4)
Butterworth lowpass filter order (doubles for filtfilt)
laplacian_chans: str, list, or None (default: None)
channels to apply laplacian filter to [Options: 'all', list of channel names, None]
For leading/lagging analysis, was using ['F3', 'F4', 'P3', 'P4']
replace_data: bool (default: False)
whether to replace primary data with laplcian filtered data
"""
if batch:
self.load_batch(fpath, match, in_num)
else:
filepath = os.path.join(fpath, fname)
in_num, start_date, slpstage, cycle = fname.split('_')[:4]
self.metadata = {'file_info':{'in_num': in_num, 'fname': fname, 'path': filepath,
'sleep_stage': slpstage,'cycle': cycle} }
if epoched is True:
self.metadata['file_info']['epoch'] = fname.split('_')[4]
# load the data
self.load_segment()
# apply laplacian
if laplacian_chans is not None:
self.metadata['analysis_info']['spatial_filter'] = 'laplacian'
data_lap = self.make_laplacian(laplacian_chans)
# replace data
if replace_data:
self.metadata['analysis_info']['RawData_replaced_wLaplacian'] = 'True'
self.data = data_lap
else:
self.metadata['analysis_info']['RawData_replaced_wLaplacian'] = 'False'
self.data_lap = data_lap
else:
self.metadata['analysis_info']['spatial_filter'] = 'None'
# apply lowpass filter
if lowpass_freq:
self.lowpass_raw(lowpass_freq, lowpass_order)
def load_segment(self):
""" Load eeg segment and extract sampling frequency. """
data = pd.read_csv(self.metadata['file_info']['path'], header = [0, 1], index_col = 0, parse_dates=True)
# Check cycle length against 5 minute duration minimum
cycle_len_secs = (data.index[-1] - data.index[0]).total_seconds()
self.data = data
diff = data.index.to_series().diff()[1:2]
# use floor to round down if not an integer
s_freq = math.floor(1000000/diff[0].microseconds)
self.metadata['file_info']['start_time'] = str(data.index[0])
self.metadata['analysis_info'] = {'s_freq': s_freq, 'cycle_len_secs': cycle_len_secs}
self.s_freq = s_freq
print('EEG successfully imported.')
def lowpass_raw(self, lowpass_freq=25, lowpass_order=4):
""" Lowpass the raw data [for visualization only -- Removes high-frequency artifacts]
Parameters
----------
lowpass_freq: int (default: 25)
lowpass frequency. must be < nyquist
lowpass_order: int (default: 4)
Butterworth lowpass filter order (doubles for filtfilt)
Returns
-------
self.channels: list of str
channel list (if not already created)
self.data_lowpass
"""
# set data
data = self.data
self.metadata['visualizations'] = {'lowpass_freq': lowpass_freq, 'lowpass_order_half': lowpass_order}
# make butterworth lowpass filter
nyquist = self.s_freq/2
data_lowpass = pd.DataFrame(index = data.index)
# adjust lowpass to nyquist
if lowpass_freq >= 1:
lowpass_freq = lowpass_freq/nyquist
# make filter
sos = butter(lowpass_order, lowpass_freq, btype='lowpass', output='sos')
# create channel attribute
channels = [x[0] for x in self.data.columns]
self.channels = channels
# filter the data
for i in channels:
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = data[i][data[i]['Raw'].isna()]
data_notnan = data[i][data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
data_notnan_filt = sosfiltfilt(sos, data_notnan.to_numpy(), axis=0)
data_notnan['Filt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['Filt']).sort_index()
# add to dataframe
data_lowpass[i] = filt_chan
# set dataframe columns
data_lowpass.columns = pd.MultiIndex.from_arrays([channels, np.repeat(('raw_lowpass'), len(channels))],names=['Channel','datatype'])
# use the lowpassed data
raw_lowpass_data = data_lowpass
self.data_lowpass = data_lowpass
# if calling a second time, replace zero-padded lowpass spindle values
if hasattr(self, 'spindles_zpad_lowpass'):
self.make_lowpass_zpad()
print('Zero-padded lowpass spindle values recalculated')
def load_batch(self, fpath, match, in_num):
""" Load a batch of EEG segments & reset index from absolute to relative time
Parameters
----------
fpath: str
absolute path to file(s) directory
match: str
string to match within the filename of all files to load (Ex: '_s2_')
in_num: str
IN number, for batch loading
TO DO: Throw error if IN doesn't match any files in folder
"""
if in_num == None:
in_num = input('Please specify IN number: ')
if match == None:
match = input('Please specify filename string to match for batch loading (ex. \'_s2_\'): ')
# get a list of all matching files
glob_match = f'{fpath}/*{match}*'
files = glob.glob(glob_match)
# load & concatenate files into a single dataframe
data = pd.concat((pd.read_csv(file, header = [0, 1], index_col = 0, parse_dates=True, low_memory=False) for file in files)).sort_index()
# extract sampling frequency
s_freq = 1/(data.index[1] - data.index[0]).total_seconds()
# reset the index to continuous time
ind_freq = str(int(1/s_freq*1000000))+'us'
ind_start = '1900-01-01 00:00:00.000'
ind = pd.date_range(start = ind_start, periods=len(data), freq=ind_freq)
data.index = ind
# set metadata & attributes
self.metadata = {'file_info':{'in_num': in_num, 'files': files, 'dir': fpath,
'match_phrase': match},
'analysis_info':{'s_freq': s_freq} }
self.data = data
self.s_freq = s_freq
def make_laplacian(self, chans):
""" Make laplacian spatial filter
Weights are determined by cartesian coordinate distance
ref1: https://hal.inria.fr/hal-01055103/document
ref2: https://arxiv.org/pdf/1406.0458.pdf
NOTE: Weights are solely determined by vector distance, NOT geometric arrangement
Parameters
----------
chans: str or list
channels to calculate laplacian for ('all' or list of channel names)
Returns
-------
self.metadata.lapalcian_weights: dict
dict of channels by 4 nearest neighbors and weights {chan: pd.Series(weight, index=neighbor_chan)}
data_lap: pd.DataFrame
laplacian filtered data for specified channels
"""
self.metadata['laplacian_weights'] = {}
# set channel names if filtering all
exclude = ['EOG_L','EOG_R', 'EKG']
channels = [x[0] for x in self.data.columns if x[0] not in exclude]
if chans == 'all':
chans = channels
# set a dict to move between casefold and raw data cols
cdict = {chan.casefold():chan for chan in channels}
def dist(ref):
""" calculate distance from reference channel """
ref_coords = coords.loc[ref]
rx = ref_coords['X']
ry = ref_coords['Y']
rz = ref_coords['Z']
dist_dict = {}
for chan in coords.index:
# calculate distance
cx, cy, cz = coords.loc[chan]['X'], coords.loc[chan]['Y'], coords.loc[chan]['Z']
d = np.sqrt((cx-rx)**2 + (cy-ry)**2 + (cz-rz)**2)
dist_dict[chan] = d
# convert to series then sort
dist_ser = pd.Series(dist_dict).sort_values()
return dist_ser
# load cartesian coords for all possible chans (10-5 montage)
all_coords = pd.read_csv('cartesian_coords.txt')
# set all chans as lowercase & make index
all_coords['Channel'] = [x.casefold() for x in all_coords.Channel]
all_coords.set_index('Channel', inplace=True)
# rename T3, T4, T5, T6 to T7, T8, P7, P8, to match change in 10-5 channel labels
# ref: http://www.jichi.ac.jp/brainlab/download/TenFive.pdf
rename = {'T3':'t7', 'T4':'t8', 'T5':'p7', 'T6':'p8'}
channels_cf = [x.casefold() if x not in rename.keys() else rename[x] for x in channels]
# grab cartesian coordinates
coords = all_coords.loc[channels_cf]
# undo renaming to revert to 10-20 conventions
undo_rename = {val:key.casefold() for key, val in rename.items()}
coords.rename(undo_rename, inplace=True)
data_lap = pd.DataFrame(index=self.data.index)
# calc nearest neighbors
for chan in chans:
c = chan.casefold()
# get neighbor distance & set weights for 4 closest neighbors
neighbors = dist(c)
n_weights = 1 - neighbors[1:5]
# calculate weighted neighbor data
weighted_neighbors = pd.DataFrame(index=self.data.index)
for neighbor, weight in n_weights.items():
# calculated weighted values
n_dat = self.data[cdict[neighbor]]*weight
# add to weighted data dict
weighted_neighbors[neighbor] = n_dat.values
# get sum of weighted data
weighted_sum = weighted_neighbors.sum(axis=1)
weighted_sum.name='weighted_sum'
# multiply ref chan by total weights
c_dat = cdict[c] # match capitalization to column names
c_weighted = self.data[c_dat]*n_weights.values.sum()
# subtract weighted channel data from weighted neighbors
lap = c_weighted.join(weighted_sum).diff(axis=1).weighted_sum
lap.name = c
data_lap[chan] = lap
# set metadata
self.metadata['laplacian_weights'][c] = n_weights
# set columns to match non-montaged data
data_lap.columns = pd.MultiIndex.from_arrays([chans, np.repeat(('Raw'), len(chans))],names=['Channel','datatype'])
return data_lap
## Spindle Detection Methods ##
# make attributes
def spindle_attributes(self):
""" Create attributes for spindle detection
Returns
-------
self.channels: list of str
channel names
self.spfiltEEG: pd.DataFrame
filtered EEG data
self.spRMS: pd.DataFrame
root mean square of filtered data
self.spRMSmavg: pd.DataFrame
moving average of the root mean square of the filtered data
self.spThresholds: pd.DataFrame
spindle detection thresholds by channel
self.spindle_events: dict
spindle event detections
self.spindle_rejects_t: dict
spindle rejects based on time domain criteria. format {chan: [spindle reject indices,...]}
self.spindle_rejects_f: dict
spindle rejects based on frequency domain criteria. format {chan: [spindle reject indices,...]}
"""
# check if channel list exists
try:
self.channels
except AttributeError:
# create if doesn't exist
self.channels = [x[0] for x in self.data.columns]
dfs =['spfiltEEG', 'spRMS', 'spRMSmavg'] # for > speed, don't store spRMS as an attribute
[setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]
self.spThresholds = pd.DataFrame(index=['Mean RMS', 'Low Threshold', 'High Threshold'])
self.spindle_events = {}
self.spindle_rejects_t = {}
self.spindle_rejects_f = {}
# step 1: make filter
def make_butter_sp(self, wn, order):
""" Make Butterworth bandpass filter [Parameters/Returns]
wn: list of int (default: [8, 16])
butterworth bandpass filter window
order: int (default: 4)
butterworth 1/2 filter order (applied forwards + backwards)
"""
nyquist = self.s_freq/2
wn_arr=np.asarray(wn)
if np.any(wn_arr <=0) or np.any(wn_arr >=1):
wn_arr = wn_arr/nyquist # must remake filter for each pt bc of differences in s_freq
self.sp_sos = butter(order, wn_arr, btype='bandpass', output='sos')
print(f"Zero phase butterworth filter successfully created: order = {order}x{order} bandpass = {wn}")
# step 2: filter channels
def spfilt(self, i):
""" Apply Butterworth bandpass to signal by channel """
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = self.data[i][self.data[i]['Raw'].isna()]
data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
data_notnan_filt = sosfiltfilt(self.sp_sos, data_notnan.to_numpy(), axis=0)
data_notnan['Filt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['Filt']).sort_index()
# add channel to main dataframe
self.spfiltEEG[i] = filt_chan
# steps 3-4: calculate RMS & smooth
def rms_smooth(self, i, sp_mw):
""" Calculate moving RMS (rectify) & smooth the EEG """
mw = int(sp_mw*self.s_freq) # convert moving window size from seconds to samples
# convolve for rolling RMS
datsq = np.power(self.spfiltEEG[i], 2)
window = np.ones(mw)/float(mw)
# convolution mode 'valid' will remove edge effects, but also introduce a time shift
# and downstream erors because it changes the length of the rms data
rms = np.sqrt(np.convolve(datsq, window, 'same'))
#spinfilt_RMS = pd.DataFrame(rms, index=self.data.index) --> add this back for > speed
self.spRMS[i] = rms # for > speed, don't store spinfilt_RMS[i] as an attribute
# smooth with moving average
rms_avg = self.spRMS[i].rolling(mw, center=True).mean()
self.spRMSmavg[i] = rms_avg
# step 5: set thresholds
def set_thres(self, i):
""" set spindle detection threshold levels, in terms of multiples of RMS SD """
mean_rms = float(np.mean(self.spRMSmavg[i]))
det_lo = float(mean_rms + self.metadata['spindle_analysis']['sp_loSD']*np.std(self.spRMSmavg[i]))
det_hi = float(mean_rms + self.metadata['spindle_analysis']['sp_hiSD']*np.std(self.spRMSmavg[i]))
self.spThresholds[i] = [mean_rms, det_lo, det_hi]
# step 6: detect spindles
def get_spindles(self, i, min_sep):
# vectorize data for detection looping
lo, hi = self.spThresholds[i]['Low Threshold'], self.spThresholds[i]['High Threshold']
mavg_varr, mavg_iarr = np.asarray(self.spRMSmavg[i]), np.asarray(self.spRMSmavg[i].index)
# initialize spindle event list & set pointer to 0
#self.spindle_events[i] = []
spindle_events = []
x=0
while x < len(self.data):
# if value crosses high threshold, start a fresh spindle
if mavg_varr[x] >= hi:
spindle = []
# count backwards to find previous low threshold crossing
for h in range(x, -1, -1):
# if a nan is encountered before the previous low crossing, break
if np.isnan(mavg_varr[h]):
break
elif mavg_varr[h] >= lo:
spindle.insert(0, mavg_iarr[h]) # add value to the beginning of the spindle
else:
break
# count forwards to find next low threshold crossing
for h in range(x+1, len(self.data), 1):
# if a nan is encountered before the next low crossing, break
if np.isnan(mavg_varr[h]):
break
# if above low threshold, add to current spindle
elif mavg_varr[h] >= lo and x < (len(self.data)-1):
spindle.append(mavg_iarr[h])
# if above low threshold and last value OR if nan, add to current spindle and add spindle to events list
elif (mavg_varr[h] >= lo and x == (len(self.data)-1)) or np.isnan(mavg_varr[h]): ## untested
spindle.append(mavg_iarr[h])
spindle_events.append(spindle)
#self.spindle_events[i].append(spindle)
# otherwise finish spindle & add to spindle events list
elif mavg_varr[h] < lo:
spindle_events.append(spindle)
#self.spindle_events[i].append(spindle)
break
# advance the pointer to the end of the spindle
x = h
# if value doesn't cross high threshold, advance
else:
x += 1
# combine spindles less than min_sep
spindle_events_msep = []
x = 0
while x < len(spindle_events)-1:
# if the following spindle is less than min_sep away
if (spindle_events[x+1][0] - spindle_events[x][-1])/np.timedelta64(1, 's') < min_sep:
# combine the two, append to list, and advance pointer by two
spindle_comb = spindle_events[x] + spindle_events[x+1]
spindle_events_msep.append(spindle_comb)
x += 2
else:
# otherwise, append spindle to list, advance pointer by 1
spindle_events_msep.append(spindle_events[x])
# if this is the second-to-last spindle, also add final spindle to list (bc not combining)
if x == (len(spindle_events)-2):
spindle_events_msep.append(spindle_events[x+1])
x += 1
self.spindle_events[i] = spindle_events_msep
# step 7: apply time domain rejection criteria
def reject_spins_t(self, min_chans_r, min_chans_d, duration):
""" Reject spindles using time domain criteria:
1. reject spindles that occur over fewer than 3 channels.
2. Apply min duration thresholding to spindles that occur over fewer than X channels.
3. Apply max duration thresholding to all remaining spindles
[chans < min_chans_r = reject; min_chans_r < chans < min_chans_d = apply min duration threshold; x > max_dur = reject]
Parameters
----------
min_chans_r: int
minimum number of channels for spindles to occur accross concurrently to bypass
automatic rejection
min_chans_d: int
minimum number of channels for spindles to occur across concurrently in order to
bypass duration criterion. performs best at 1/4 of total chans
duration: list of float
duration range (seconds) for spindle thresholding
Returns
-------
modified self.spindle_events and self.spindle_rejects_t attributes
"""
# convert duration from seconds to samples
#sduration = [x*self.s_freq for x in duration]
# make boolean mask for spindle presence
bool_dict = {}
for chan in self.spindle_events:
if chan not in ['EOG_L', 'EOG_R', 'EKG']:
spins_flat = [time for spindle in self.spindle_events[chan] for time in spindle]
bool_dict[chan] = np.isin(self.data.index.values, spins_flat)
spin_bool = pd.DataFrame(bool_dict, index = self.data.index.values)
spin_bool['chans_present'] = spin_bool.sum(axis=1)
spindle_rejects_t = {}
true_events = {}
spindle_events = self.spindle_events
# check individual spindles
for chan in spindle_events:
reject_idxs = []
for e, spin in enumerate(spindle_events[chan]):
spin_secs = (spin[-1] - spin[0])/np.timedelta64(1, 's')
# reject if present over less than min_chans_r channels
if not np.any(spin_bool['chans_present'].loc[spin] >= min_chans_r):
reject_idxs.append(e)
# Apply min duration threshold if not present over more than minimum # of channels
elif not np.any(spin_bool['chans_present'].loc[spin] >= min_chans_d):
# apply duration thresholding
if not duration[0] <= spin_secs <= duration[1]:
reject_idxs.append(e)
# Apply max duration threshold to all spindles left (regardless of # of chans)
else:
if spin_secs > duration[1]:
reject_idxs.append(e)
# append spins to rejects
spindle_rejects_t[chan] = [spindle_events[chan][idx] for idx in reject_idxs]
true_events[chan] = [spin for e, spin in enumerate(spindle_events[chan]) if e not in reject_idxs]
# replace values
self.spindle_rejects_t = spindle_rejects_t
self.spindle_events = true_events
# set multiIndex
def spMultiIndex(self):
""" combine dataframes into a multiIndex dataframe"""
# reset column levels
self.spfiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])
self.spRMS.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMS'), len(self.channels))],names=['Channel','datatype'])
self.spRMSmavg.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('RMSmavg'), len(self.channels))],names=['Channel','datatype'])
# list df vars for index specs
dfs =[self.spfiltEEG, self.spRMS, self.spRMSmavg] # for > speed, don't store spinfilt_RMS as an attribute
calcs = ['Filtered', 'RMS', 'RMSmavg']
lvl0 = np.repeat(self.channels, len(calcs))
lvl1 = calcs*len(self.channels)
# combine & custom sort
self.spindle_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])
# step 8: create individual spindle dataframes
def create_spindfs(self, zmethod, trough_dtype, buff, buffer_len):
""" Create individual dataframes for individual spindles +/- a timedelta buffer
** NOTE: buffer doesn't have spinso filter incorporated
Parameters
----------
zmethod: str (default: 'trough')
method used to assign 0-center to spindles [options: 'trough', 'middle']. Trough assigns zero-center to
the deepest negative trough. Middle assigns zero center to the midpoint in time.
trough_dtype: str (default: 'spfilt')
Which data to use for picking the most negative trough for centering [options: 'Raw', 'spfilt']
buff: bool (default: False)
calculate spindle dataframes with buffer
buffer_len: int
length in seconds of buffer to calculate around 0-center of spindle
self.spindle_events: dict
dict of timestamps when spindles occur (created from self.detect_spindles())
self.data: pd.DataFrame
df containing raw EEG data
Returns
-------
self.spindles: nested dict of dfs
nested dict with spindle data by channel {channel: {spindle_num:spindle_data}}
self.spindles_wbuffer: nested dict of dfs
nested dict with spindle data w/ timedelta buffer by channel {channel: {spindle_num:spindle_data}}
"""
## create dict of dataframes for spindle analysis
print('Creating individual spindle dataframes...')
self.metadata['spindle_analysis']['zmethod'] = zmethod
self.metadata['spindle_analysis']['trough_datatype'] = trough_dtype
spindles = {}
for chan in self.spindle_events.keys():
spindles[chan] = {}
for i, spin in enumerate(self.spindle_events[chan]):
# create individual df for each spindle
spin_data = self.data[chan]['Raw'].loc[self.spindle_events[chan][i]]
spfilt_data = self.spfiltEEG[chan]['Filtered'].loc[self.spindle_events[chan][i]]
# try:
# spsofilt_data = self.spsofiltEEG[chan]['Filtered'].loc[self.spindle_events[chan][i]]
# # skip spsofilt if not yet calculated (if SO detections haven't been performed)
# except AttributeError:
# pass
# set new index so that each spindle is centered around zero
if zmethod == 'middle':
# this method could use some work
half_length = len(spin)/2
t_id = np.linspace(-half_length, half_length, int(2*half_length//1))
# convert from samples to ms
id_ms = t_id * (1/self.metadata['analysis_info']['s_freq']*1000)
elif zmethod == 'trough' and trough_dtype == 'Raw':
id_ms = (spin_data.index - spin_data.idxmin()).total_seconds()*1000
elif zmethod == 'trough' and trough_dtype == 'spfilt':
id_ms = (spfilt_data.index - spfilt_data.idxmin()).total_seconds()*1000
# create new dataframe
spindles[chan][i] = pd.DataFrame(index=id_ms)
spindles[chan][i].index = [int(x) for x in spindles[chan][i].index]
spindles[chan][i].index.name='id_ms'
spindles[chan][i]['time'] = spin_data.index
spindles[chan][i]['Raw'] = spin_data.values
spindles[chan][i]['spfilt'] = spfilt_data.values
try:
spindle[chan][i]['spsofilt'] = spsofilt_data.values
# skip spsofilt if not yet calculated (if SO detections haven't been performed)
except NameError:
pass
self.spindles = spindles
print('Spindle dataframes created. Spindle data stored in obj.spindles.')
if buff:
# now make buffered dataframes
print(f'Creating spindle dataframes with {buffer_len}s buffer...')
spindles_wbuffer = {}
for chan in self.spindles.keys():
spindles_wbuffer[chan] = {}
for i in self.spindles[chan].keys():
# get +/- buffer length from zero-center of spindle
start = self.spindles[chan][i]['time'].loc[0] - pd.Timedelta(seconds=buffer_len)
end = self.spindles[chan][i]['time'].loc[0] + pd.Timedelta(seconds=buffer_len)
spin_buffer_data = self.data[chan]['Raw'].loc[start:end]
# assign the delta time index
id_ms = (spin_buffer_data.index - self.spindles[chan][i]['time'].loc[0]).total_seconds()*1000
# create new dataframe
spindles_wbuffer[chan][i] = pd.DataFrame(index=id_ms)
spindles_wbuffer[chan][i].index = [int(x) for x in spindles_wbuffer[chan][i].index]
spindles_wbuffer[chan][i].index.name='id_ms'
spindles_wbuffer[chan][i]['time'] = spin_buffer_data.index
spindles_wbuffer[chan][i]['Raw'] = spin_buffer_data.values
self.spindles_wbuffer = spindles_wbuffer
print('Spindle dataframes with buffer stored in obj.spindles_wbuffer.')
def make_lowpass_zpad(self):
""" Construct zero-padded spindle and spindle reject dictionaries for lowpass filtered data.
Needed for sleepyplot.spec_spins(). Called by self.lowpass_raw() and self.calc_spindle_psd_i
Returns
-------
self.spindles_zpad_lowpass: nested dict
dict of zero-padded spindle values from lowpass filtered data (format: {chan:{spin #: values}})
self.spindles_zpad_rejects_lowpass: numpy.ndarray
dict of zero-padded spindle frequency domain reject values from lowpass filtered data (format: {chan:{spin #: values}})
"""
def create_zpad(spin, chan, x, zpad_len):
""" Create the zero-padded spindle from raw data
Parameters
----------
spin: np.array
spindle mV values
zpad_len: float
length to zero-pad the data to (in seconds) """
# subtract mean to zero-center spindle for zero-padding
sf = self.s_freq
data = spin.values - np.mean(spin.values)
zpad_samples=0
zpad_seconds=0
tx=0
total_len = zpad_len*sf
zpad_samples = total_len - len(data)
zpad_seconds = zpad_samples/sf
if zpad_samples > 0:
padding = np.repeat(0, zpad_samples)
data_pad = np.append(data, padding)
else:
spin_len = len(data)/sf
print(f'Spindle {chan}:{x} length {spin_len} seconds longer than pad length {zpad_len}')
data_pad = data
# return the zero-padded spindle
return data_pad
# grab attributes
spindles = self.spindles
data_lowpass = self.data_lowpass
spindle_rejects_f = self.spindle_rejects_f
spindles_zpad_rejects = self.spindles_zpad_rejects
# get length of zero-padding
zpad_len = self.metadata['spindle_analysis']['zeropad_len_sec']
spindles_zpad_lowpass = {}
spindles_zpad_rejects_lowpass = {}
for chan in spindles:
spindles_zpad_lowpass[chan] = {}
spindles_zpad_rejects_lowpass[chan] = {}
# if there are spindles on that channel
if len(spindles[chan]) > 0:
# for each true spindle
for x in spindles[chan]:
# get the time index & low-pass values
spin_idx = [np.datetime64(t) for t in spindles[chan][x].time.values]
spin = data_lowpass[chan].loc[spin_idx]
# make the zero-padding
data_pad = create_zpad(spin, chan, x, zpad_len)
# add to dict
spindles_zpad_lowpass[chan][x] = data_pad
if len(spindle_rejects_f[chan]) > 0:
reject_dict = {key:idxs for key, idxs in zip(spindles_zpad_rejects[chan].keys(), spindle_rejects_f[chan])}
# for each rejected spindle
for x, spin_idx in reject_dict.items():
# get the low-pass values
spin = data_lowpass[chan].loc[spin_idx]
# make the zero-padding
data_pad = create_zpad(spin, chan, x, zpad_len)
# add to dict
spindles_zpad_rejects_lowpass[chan][x] = data_pad
# save as attributes
self.spindles_zpad_lowpass = spindles_zpad_lowpass
self.spindles_zpad_rejects_lowpass = spindles_zpad_rejects_lowpass
# step 9. calculate power spectrum for each spindle
def calc_spindle_psd_i(self, psd_bandwidth, zpad, zpad_len, pwr_prune, pwr_thres, spin_range, prune_range, min_peaks, pk_width_hz):
""" Calculate multitaper power spectrum for individual spindles across all channels
Option to threshold spindle detections based on a % power threshold.
Params
------
psd_bandwidth: float
frequency resolution in Hz
zpad: bool (default: True)
whether to zeropad the data (for increased spectral resolution)
zpad_len: float
length to zero-pad the data to (in seconds)
pwr_prune: bool
Whether to reject spindles using frequency-domain criterion: power in spindle range must = >X% of total power in prune range
Ex. spindle power must be >30% of total power between 4-25Hz
pwr_thres: float
% of power >4Hz that must be in the spindle range for a spindle to avoid rejection
spin_range: list of int
spindle frequency range (inclusive) to be used for spindle analysis and power thresholding
prune_range: list of float
frequency range for denominator of % power threshold calculation
min_peaks: int (default: 1)
minimum number of spectral peaks in the spindle range for a spindle to be accepted
pk_width_hz: float (default: 0.5)
minimum width (in Hz) for a peak to be considered a peak
Returns
-------
self.spindles_zpad: dict
zero-padded spindle values
self.spindles_zpad_rejects: dict
zero-padded spindle values for spins rejected in frequency domain
self.spindle_psd_i: dict
power spectra of individual spindles. format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_psd_i_rejects: dict
power spectra of individual spindles rejected in frequency domain. format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: dict of pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each spindle by channel
self.spindle_multitaper_calcs_rejects: dict of pd.DataFrame
calculations used to calculated multitaper power spectral estimates for spindles rejected in frequency domain
"""
print('Calculating power spectra (this may take a few minutes)...')
# update metadata
analysis_metadata = {'psd_dtype': 'raw_individual', 'psd_method':'multitaper', 'psd_bandwidth':psd_bandwidth,
'zeropad': zpad, 'zeropad_len_sec': zpad_len, 'pwr_prune': pwr_prune, 'pwr_thres': pwr_thres,
'prune_range': prune_range, 'min_peaks': min_peaks, 'pk_width_hz': pk_width_hz}
self.metadata['spindle_analysis'].update(analysis_metadata)
sf = self.metadata['analysis_info']['s_freq']
spin_range = self.metadata['spindle_analysis']['spin_range']
rmv_spins = {}
spindle_rejects_f = {}
spindles_zpad = {}
spindles_zpad_rejects = {}
spindle_psd = {}
spindle_psd_rejects = {}
spindle_multitaper_calcs = {}
spindle_multitaper_calcs_rejects = {}
for chan in self.spindles:
spindles_zpad[chan] = {}
spindles_zpad_rejects[chan] = {}
spindle_psd[chan] = {}
spindle_psd_rejects[chan] = {}
spindle_rejects_f[chan] = []
rmv_spins[chan] = []
# set up multitaper_calcs df
# waveform resolution is dependent on length of signal, regardless of zero-padding
spindle_multitaper_calcs[chan] = pd.DataFrame(columns=['spin_samples', 'spin_seconds', 'zpad_samples', 'zpad_seconds', 'waveform_resoultion_Hz',
'psd_resolution_Hz', 'N_taper_len', 'W_bandwidth', 'K_tapers', f'perc_{prune_range[0]}-{prune_range[1]}Hzpwr_in_spin_range'])
spindle_multitaper_calcs[chan].index.name = 'spindle_num'
# for spindle rejects
spindle_multitaper_calcs_rejects[chan] = pd.DataFrame(columns=['spin_samples', 'spin_seconds', 'zpad_samples', 'zpad_seconds', 'waveform_resoultion_Hz',
'psd_resolution_Hz', 'N_taper_len', 'W_bandwidth', 'K_tapers', f'perc_{prune_range[0]}-{prune_range[1]}Hzpwr_in_spin_range'])
spindle_multitaper_calcs_rejects[chan].index.name = 'spindle_num'
if len(self.spindles[chan]) > 0:
for x in self.spindles[chan]:
# subtract mean to zero-center spindle for zero-padding
data = self.spindles[chan][x].Raw.values - np.mean(self.spindles[chan][x].Raw.values)
zpad_samples=0
zpad_seconds=0
tx=0
# option to zero-pad the spindle
if zpad:
total_len = zpad_len*sf
zpad_samples = total_len - len(data)
zpad_seconds = zpad_samples/sf
if zpad_samples > 0:
padding = np.repeat(0, zpad_samples)
data_pad = np.append(data, padding)
else:
spin_len = len(data)/sf
print(f'Spindle {chan}:{x} length {spin_len} seconds longer than pad length {zpad_len}')
data_pad = data
# or leave as-is
else:
data_pad = data
# record PS params [K = 2NW-1]
spin_samples = len(data)
spin_seconds = len(data)/sf
waveform_res = 1/spin_seconds
psd_res = 1/(len(data_pad)/sf)
N_taper_len = len(data_pad)/sf
W_bandwidth = psd_bandwidth
K_tapers = int((2*N_taper_len*W_bandwidth)-1)
# calculate power spectrum
try:
pwr, freqs = psd_array_multitaper(data_pad, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25,
normalization='full', verbose=0)
except ValueError:
print(f'Specified bandwidth too small for data length. Skipping spindle {chan}:{x}.')
continue
# convert to series & add to dict
psd = pd.Series(pwr, index=freqs)
# set spindle status for rejection checks
status = True
# check for minimum spectral peaks
# set minimum distance between peaks equal to psd_bandwidth
samp_per_hz = len(psd)/(psd.index[-1]-psd.index[0])
bw_hz = self.metadata['spindle_analysis']['psd_bandwidth']
distance = samp_per_hz*bw_hz
# set minimum width in samples for a peak to be considered a peak
width = samp_per_hz*pk_width_hz
# get peaks
spindle_power = psd[(psd.index >= spin_range[0]) & (psd.index <= spin_range[1])]
p_idx, props = find_peaks(spindle_power, distance=distance, width=width, prominence=0.0)
# reject if < min peaks
if len(p_idx) < min_peaks:
# add to self.spindle_rejects_f
spindle_rejects_f[chan].append(self.spindle_events[chan][x])
# record params for removal from self.spindles & self.spindle_events after loop is complete
rmv_spins[chan].append(x)
# add to rejects psd dicts
spin_perc = 'not_calculated'
spindle_psd_rejects[chan][x] = psd
spindles_zpad_rejects[chan][x] = data_pad
spindle_multitaper_calcs_rejects[chan].loc[x] = [spin_samples, spin_seconds, zpad_samples, zpad_seconds, waveform_res, psd_res, N_taper_len, W_bandwidth, K_tapers, spin_perc]
# set status to false
status = False
# if spindle wasn't rejected by min_peaks criterion
if status == True:
# if not applying power % threshold
if pwr_prune == False:
# add to psd dicts
spindle_psd[chan][x] = psd
spindles_zpad[chan][x] = data_pad
spin_perc = 'not_calculated'
# otherwise apply power % threshold
elif pwr_prune:
# calculate total power > 4Hz
psd_subset = psd[(psd.index >= prune_range[0]) & (psd.index <= prune_range[1])]
# power in spindle range
psd_spins = psd[(psd.index >= spin_range[0]) & (psd.index <= spin_range[1])]
# percent of power > 4Hz in spindle range
spin_perc = int(psd_spins.sum()/psd_subset.sum()*100)
if spin_perc <= pwr_thres:
# add to self.spindle_rejects_f
spindle_rejects_f[chan].append(self.spindle_events[chan][x])
# record params for removal from self.spindles & self.spindle_events after loop is complete
rmv_spins[chan].append(x)
# add to rejects psd dicts
spindle_psd_rejects[chan][x] = psd
spindles_zpad_rejects[chan][x] = data_pad
spindle_multitaper_calcs_rejects[chan].loc[x] = [spin_samples, spin_seconds, zpad_samples, zpad_seconds, waveform_res, psd_res, N_taper_len, W_bandwidth, K_tapers, spin_perc]
else:
# add to psd dicts
spindle_psd[chan][x] = psd
spindles_zpad[chan][x] = data_pad
spindle_multitaper_calcs[chan].loc[x] = [spin_samples, spin_seconds, zpad_samples, zpad_seconds, waveform_res, psd_res, N_taper_len, W_bandwidth, K_tapers, spin_perc]
# remove rejects from self.spindles & self.spindle_events
for chan, spin_list in rmv_spins.items():
# iterate backwards so that list indices for spindle_events don't change for subsequent items
for spin in reversed(spin_list):
del self.spindles[chan][spin]
del self.spindle_events[chan][spin]
self.spindles_zpad = spindles_zpad
self.spindles_zpad_rejects = spindles_zpad_rejects
self.spindle_multitaper_calcs = spindle_multitaper_calcs
self.spindle_multitaper_calcs_rejects = spindle_multitaper_calcs_rejects
self.spindle_psd_i = spindle_psd
self.spindle_psd_i_rejects = spindle_psd_rejects
self.spindle_rejects_f = spindle_rejects_f
print('Spectra stored in obj.spindle_psd_i. Calculations stored in obj.spindle_multitaper_calcs. Zero-padded spindle data in obj.spindles_zpad.\n')
# calculate zero-padded lowpass filtered spindles if data has been lowpassed
if hasattr(self, 'data_lowpass'):
self.make_lowpass_zpad()
print('Zero-padded lowpass filtered tabulated. Stored in obj.spindles_zpad_lowpass.')
def detect_spindles(self, wn=[8, 16], order=4, sp_mw=0.2, loSD=0, hiSD=1.5, min_sep=0.2, duration=[0.5, 3.0], min_chans_r=3, min_chans_d=9,
zmethod='trough', trough_dtype='spfilt', buff=False, buffer_len=3, psd_bandwidth=1.0, zpad=True, zpad_len=3.0, pwr_prune=True,
pwr_thres=30, spin_range=[9, 16], prune_range=[4, 25], min_peaks=1, pk_width_hz=0.5):
""" Detect spindles by channel
Parameters
----------
wn: list of int (default: [8, 16])
butterworth bandpass filter window
order: int (default: 4)
butterworth 1/2 filter order (applied forwards + backwards)
sp_mw: float (default: 0.2)
moving window size for RMS & moving average calcs (seconds)
loSD: float (default: 0)
standard deviations above the average RMS that the spindle envelope must drop below to signify beginning/end of spindle
hiSD: float (default: 1.5)
standard deviations above the average RMS that the spindle envelope must exceed for a detection to be initiated
min_sep: float (default: 0.1)
minimum separation (in seconds) for spindles to be considered distinct, otherwise combine
min_chans_r: int (default: 3)
minimum number of channels for spindles to occur accross concurrently to bypass
automatic rejection
min_chans_d: int (default: 9)
minimum number of channels for spindles to occur across concurrently in order to
bypass duration criterion. performs best at 1/4 of total chans
duration: list of float
duration range (seconds) for spindle thresholding
zmethod: str (default: 'trough')
method used to assign 0-center to spindles [options: 'trough', 'middle']. Trough assigns zero-center to
the deepest negative trough. Middle assigns zero center to the midpoint in time.
trough_dtype: str (default: 'spfilt')
Which data to use for picking the most negative trough for centering [options: 'Raw', 'spfilt']
buff: bool (default: False)
calculate spindle data dataframes with a delta time buffer around center of spindle
buffer_len: int
length in seconds of buffer to calculate around 0-center of spindle
psd_bandwidth: float (default: 1.0)
frequency resolution in Hz
zpad: bool (default: False)
whether to zeropad the data (for increased spectral resolution)
zpad_len: float
length to zero-pad the data to (in seconds)
pwr_prune: bool (default: True)
Whether to reject spindles using frequency-domain criterion: power in spindle range must = >X% of total power in prune range
Ex. spindle power must be >30% of total power between 4-25Hz
pwr_thres: float (default: 30)
% of power >4Hz that must be in the spindle range for a spindle to avoid rejection
spin_range: list of int (default: [9, 16])
spindle frequency range (inclusive) to be used for spindle analysis and power thresholding
prune_range: list of float
frequency range for denominator of % power threshold calculation
min_peaks: int (default: 1)
minimum number of spectral peaks in the spindle range for a spindle to be accepted
pk_width_hz: float (default: 0.5)
minimum width (in Hz) for a peak to be considered a peak
Returns
-------
## incomplete ##
self.spindle_psd_i: nested dict
power spectra for individual spindles by channel (Only if psd_type == 'i')
format {channel: {spindle: pd.Series}} with index = frequencies and values = power (uV^2/Hz)
"""
self.metadata['spindle_analysis'] = {'sp_filtwindow': wn, 'sp_filtorder_half': order,
'sp_RMSmw': sp_mw, 'sp_loSD': loSD, 'sp_hiSD': hiSD, 'min_sep': min_sep, 'sp_duration': duration,
'sp_minchans_toskipautoreject': min_chans_r, 'sp_minchans_toskipduration': min_chans_d, 'spin_range':spin_range}
#self.s_freq = self.metadata['analysis_info']['s_freq']
# set attributes
self.spindle_attributes()
# Make filter
self.make_butter_sp(wn, order)
print('Detecting spindles...')
# loop through channels (all channels for plotting ease)
for i in self.channels:
# if i not in ['EOG_L', 'EOG_R', 'EKG']:
#print(f'Detecting spindles on {i}...')
# Filter
self.spfilt(i)
# Calculate RMS & smooth
self.rms_smooth(i, sp_mw)
# Set detection thresholds
self.set_thres(i)
# Detect spindles
self.get_spindles(i, min_sep)
# combine dataframes
print('Combining dataframes...')
self.spMultiIndex()
# Apply time-domain rejection criteria
print('Pruning spindle detections...')
self.reject_spins_t(min_chans_r, min_chans_d, duration)
# create individual datframes for each spindle
self.create_spindfs(zmethod, trough_dtype, buff, buffer_len)
# calculate power for individual spindles & prune in frequency domain
self.calc_spindle_psd_i(psd_bandwidth, zpad, zpad_len, pwr_prune, pwr_thres, spin_range, prune_range, min_peaks, pk_width_hz)
print('Spindle detection complete.')
print('done.\n')
def calc_spindle_means(self):
""" Calculate mean, std, and sem at each timedelta from negative spindle peak per channel
Returns
-------
self.spindle_means: nested dict
dictionary of raw and filtered spindle means by channel
format: {'Raw':{channel:pd.DataFrame}}, 'spfilt':{channel:pd.DataFrame}}
"""
print('Aligning spindles...')
# align spindles accoridng to timedelta & combine into single dataframe
spindle_aggregates = {}
datatypes = ['Raw', 'spfilt']
for chan in self.spindles.keys():
# only use channels that have spindles
if self.spindles[chan]:
spindle_aggregates[chan] = {}
for datatype in datatypes:
# set the base df
first_spin = list(self.spindles[chan].keys())[0]
first_spin_colname = f'spin_{first_spin}'
agg_df = pd.DataFrame(self.spindles[chan][first_spin][datatype])
agg_df = agg_df.rename(columns={datatype:first_spin_colname})
rsuffix = list(self.spindles[chan].keys())[1:]
# join on the index for each spindle
agg_df = agg_df.join([self.spindles[chan][x][datatype].rename('spin_'+str(x)) for x in rsuffix], how='outer')
spindle_aggregates[chan][datatype] = agg_df
print('Calculating spindle statistics...')
# create a new multiindex dataframe for calculations
spindle_means = {}
calcs = ['count', 'mean', 'std' ,'sem']
tuples = [(chan, calc) for chan in spindle_aggregates.keys() for calc in calcs]
columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])
for datatype in datatypes:
spindle_means[datatype] = pd.DataFrame(columns=columns)
# fill the dataframe
for chan in spindle_aggregates.keys():
spindle_means[datatype][(chan, 'count')] = spindle_aggregates[chan][datatype].notna().sum(axis=1)
spindle_means[datatype][(chan, 'mean')] = spindle_aggregates[chan][datatype].mean(axis=1)
spindle_means[datatype][(chan, 'std')] = spindle_aggregates[chan][datatype].std(axis=1)
spindle_means[datatype][(chan, 'sem')] = spindle_aggregates[chan][datatype].sem(axis=1)
self.spindle_aggregates = spindle_aggregates
self.spindle_means = spindle_means
print('Done. Spindles aggregated by channel in obj.spindle_aggregates dict. Spindle statisics stored in obj.spindle_means dataframe.\n')
def calc_spindle_buffer_means(self):
""" Calculate mean, std, and sem at each timedelta from negative spindle peak per channel
NOTE: This needs to be updated to include datatype parameter to stay aligned with calc_spin_means
Also fix the join command for speed (see above)
"""
print('Aligning spindles...')
# align spindles accoridng to timedelta & combine into single dataframe
spindle_buffer_aggregates = {}
for chan in self.spindles.keys():
# only use channels that have spindles
if self.spindles_wbuffer[chan]:
# set the base df
agg_df = pd.DataFrame(self.spindles_wbuffer[chan][0]['Raw'])
rsuffix = list(range(1, len(self.spindles_wbuffer[chan])))
# join on the index for each spindle
for x in range(1, len(self.spindles_wbuffer[chan])):
mean_df = agg_df.join(self.spindles_wbuffer[chan][x]['Raw'], how='outer', rsuffix=rsuffix[x-1])
spindle_buffer_aggregates[chan] = mean_df
print('Calculating statistics...')
# create a new multiindex dataframe for calculations
calcs = ['mean', 'std' ,'sem']
tuples = [(chan, calc) for chan in spindle_buffer_aggregates.keys() for calc in calcs]
columns = pd.MultiIndex.from_tuples(tuples, names=['channel', 'calc'])
spindle_buffer_means = pd.DataFrame(columns=columns)
# fill the dataframe
for chan in spindle_buffer_aggregates.keys():
spindle_buffer_means[(chan, 'mean')] = spindle_buffer_aggregates[chan].mean(axis=1)
spindle_buffer_means[(chan, 'std')] = spindle_buffer_aggregates[chan].std(axis=1)
spindle_buffer_means[(chan, 'sem')] = spindle_buffer_aggregates[chan].sem(axis=1)
self.spindle_buffer_aggregates = spindle_buffer_aggregates
self.spindle_buffer_means = spindle_buffer_means
print('Done. Spindles aggregated by channel in obj.spindle_buffer_aggregates dict. Spindle statisics stored in obj.spindle_buffer_means dataframe.')
def calc_spin_tstats(self):
""" calculate time-domain spindle feature statistics
Returns
-------
self.spindle_tstats: pd.DataFrame
MultiIndex dataframe with calculated spindle time statistics
"""
spin_range = self.metadata['spindle_analysis']['spin_range']
print('Calculating spindle time-domain statistics...')
# create multi-index dataframe
lvl1 = ['Count', 'Duration', 'Duration', 'Amplitude_raw', 'Amplitude_raw', 'Amplitude_spfilt', 'Amplitude_spfilt', 'Density', 'ISI', 'ISI']
lvl2 = ['total', 'mean', 'sd', 'rms', 'sd', 'rms', 'sd', 'spin_per_min', 'mean', 'sd']
columns = pd.MultiIndex.from_arrays([lvl1, lvl2])
spindle_stats = pd.DataFrame(columns=columns)
#exclude non-EEG channels
exclude = ['EOG_L', 'EOG_R', 'EKG']
# fill dataframe
for chan in self.spindles:
if chan not in exclude:
# calculate spindle count
count = len(self.spindles[chan])
if count == 0:
spindle_stats.loc[chan] = [count, None, None, None, None, None, None, None, None, None]
else:
# calculate spindle duration
durations = np.array([(self.spindles[chan][spin].time.iloc[-1] - self.spindles[chan][spin].time.iloc[0]).total_seconds() for spin in self.spindles[chan]])
duration_mean = durations.mean()
duration_sd = durations.std()
# calculate amplitude
amplitudes_raw = np.concatenate([self.spindles[chan][x].Raw.values for x in self.spindles[chan]])
amp_rms_raw = np.sqrt(np.array([x**2 for x in amplitudes_raw]).mean())
amp_sd_raw = amplitudes_raw.std()
amplitudes_spfilt = np.concatenate([self.spindles[chan][x].spfilt.values for x in self.spindles[chan]])
amp_rms_spfilt = np.sqrt(np.array([x**2 for x in amplitudes_spfilt]).mean())
amp_sd_spfilt = amplitudes_spfilt.std()
# calculate density
#density = count/((self.data.index[-1] - self.data.index[0]).total_seconds()/60)
data_notnan = self.data[chan][self.data[chan]['Raw'].isna() == False]
minutes = (len(data_notnan)/self.s_freq)/60
density = count/(minutes)
# calculate inter-spindle-interval (ISI)
if len(self.spindles[chan]) > 1:
spin_keys = list(self.spindles[chan].keys())
# make a list of tuples of ISI start and end timestamps
isi_ranges = [(self.spindles[chan][spin_keys[x]].time.iloc[-1], self.spindles[chan][spin_keys[x+1]].time.iloc[0]) for x in range(len(spin_keys)) if x < len(spin_keys)-1]
# keep the ISI tuple only if there are no NaNs in the data (no missing data)
notNaN_isi_ranges = [i for i in isi_ranges if np.any(np.isnan(self.data[chan].loc[i[0]:i[1]])) == False]
# calculate the total seconds for each tuple
isi_arr = np.array([(isi[1]-isi[0]).total_seconds() for isi in notNaN_isi_ranges])
isi_mean = isi_arr.mean()
isi_sd = isi_arr.std()
else:
isi_mean = None
isi_sd = None
spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd]
# spindle_stats.loc[chan] = [count, duration_mean, duration_sd, amp_rms_raw, amp_sd_raw, amp_rms_spfilt, amp_sd_spfilt, density, isi_mean, isi_sd, center_freq, total_pwr]
self.spindle_tstats = spindle_stats
print('Spindle time stats stored in obj.spindle_tstats.\n')
def calc_spindle_psd_concat(self, psd_bandwidth):
""" Calculate multitaper power spectrum of concated spindles for each channel
Params
------
psd_bandwidth: float
frequency resolution in Hz
Returns
-------
self.spindle_psd_concat: dict
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs_concat: pd.DataFrame
calculations used to calculated concatenated multitaper power spectral estimates for each channel
"""
print('Calculating power spectra (this may take a few minutes)...')
self.metadata['spindle_analysis']['psd_dtype'] = 'raw_concat'
self.metadata['spindle_analysis']['psd_method'] = 'multitaper'
self.metadata['spindle_analysis']['psd_bandwidth'] = psd_bandwidth
sf = self.metadata['analysis_info']['s_freq']
spindle_psd = {}
spindle_multitaper_calcs_concat = pd.DataFrame(index=['data_len', 'N', 'W', 'NW', 'K'])
for chan in self.spindles:
#print(f'Calculating spectra for {chan}...')
if len(self.spindles[chan]) > 0:
# concatenate spindles
spindles = [self.spindles[chan][x].Raw.values for x in self.spindles[chan]]
data = np.concatenate(spindles)
# calculate power spectrum
try:
pwr, freqs = psd_array_multitaper(data, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25,
normalization='full', verbose=0)
except ValueError as e:
print(e)
min_bw = float((str(e)).split(' ')[-1])
# round up to the nearest hundredth bc using exact # can still throw occasional errors
psd_bandwidth = math.ceil(min_bw*100)/100
print(f'Setting psd_bandwidth to {psd_bandwidth}')
pwr, freqs = psd_array_multitaper(data, sf, adaptive=True, bandwidth=psd_bandwidth, fmax=25,
normalization='full', verbose=0)
# convert to series & add to dict
psd = pd.Series(pwr, index=freqs)
spindle_psd[chan] = psd
# record PS params [K = 2NW-1]
N = len(data)/sf
W = psd_bandwidth
K = int((2*N*W)-1)
spindle_multitaper_calcs_concat[chan] = [len(data), N, W, N*W, K]
self.spindle_multitaper_calcs_concat = spindle_multitaper_calcs_concat
self.spindle_psd_concat = spindle_psd
print('Done. Spectra stored in obj.spindle_psd_concat. Calculations stored in obj.spindle_multitaper_calcs_concat.\n')
def calc_gottselig_norm(self, norm_range):
""" calculated normalized spindle power on EEG channels (from Gottselig et al., 2002). works with
calc_spindle_psd_concat.
TO DO: change p0 value if optimize warning
Parameters
----------
norm_range: list of tuple
frequency ranges for gottselig normalization
Returns
-------
self.spindle_psd_concat_norm: nested dict
format {chan: pd.Series(normalized power, index=frequency)}
"""
print('Calculating Gottselig normalization...')
def exponential_func(x, a, b, c):
return a*np.exp(-b*x)+c
self.metadata['spindle_analysis']['gottselig_range'] = norm_range
exclude = ['EOG_L', 'EOG_R', 'EKG']
spindle_psd_norm = {}
chans_norm_failed = []
for chan in self.spindle_psd_concat:
if chan not in exclude:
spindle_psd_norm[chan] = {}
# specify data to be fit (only data in norm range)
incl_freqs = np.logical_or(((self.spindle_psd_concat[chan].index >= norm_range[0][0]) & (self.spindle_psd_concat[chan].index <= norm_range[0][1])),
((self.spindle_psd_concat[chan].index >= norm_range[1][0]) & (self.spindle_psd_concat[chan].index <= norm_range[1][1])))
pwr_fit = self.spindle_psd_concat[chan][incl_freqs]
# set x and y values (convert y to dB)
x_pwr_fit = pwr_fit.index
y_pwr_fit = 10 * np.log10(pwr_fit.values)
# fit exponential -- try second fit line if first throws infinite covariance
try:
popt, pcov = curve_fit(exponential_func, xdata=x_pwr_fit, ydata=y_pwr_fit, p0=(1, 0, 1))
except (OptimizeWarning, RuntimeError, TypeError):
try:
popt, pcov = curve_fit(exponential_func, xdata=x_pwr_fit, ydata=y_pwr_fit, p0=(1, 1e-6, 1))
except (OptimizeWarning, RuntimeError, TypeError) as e:
popt = np.full(3, np.nan)
chans_norm_failed.append(chan)
print(f'scipy.optimize.curvefit encountered error "{e}" on channel {chan}. Normalization skipped for this channel.')
pass
xx = self.spindle_psd_concat[chan].index
yy = exponential_func(xx, *popt)
# subtract the fit line
psd_norm = pd.Series(10*np.log10(self.spindle_psd_concat[chan].values) - yy, index=self.spindle_psd_concat[chan].index)
# save the values
spindle_psd_norm[chan]['normed_pwr'] = psd_norm
spindle_psd_norm[chan]['values_to_fit'] = pd.Series(y_pwr_fit, index=x_pwr_fit)
spindle_psd_norm[chan]['exp_fit_line'] = pd.Series(yy, index=xx)
self.spindle_psd_concat_norm = spindle_psd_norm
self.metadata['spindle_analysis']['chans_concat_norm_failed'] = chans_norm_failed
print('Gottselig normalization data stored in obj.spindle_psd_concat_norm.\n')
def calc_spin_stats_i(self):
""" Calculate statistics for individual spindles """
print('\nCalculating individual spindle statistics...')
# pull minimum width (in Hz) for a peak to be considered a peak
pk_width_hz = self.metadata['spindle_analysis']['pk_width_hz']
# create list of rows to be converted into dataframe
stats_i_rows = []
# create column names for dict keys to build rows
cols = ['AP', 'RL', 'chan', 'spin', 'dur_ms', 'amp_raw_rms', 'amp_spfilt_rms',
'dominant_freq_Hz', 'total_peaks', 'peak_freqs_Hz', 'peak_ratios', 'peak2_freq', 'peak2_ratio', 'total_pwr_ms2']
# assign anterior-posterior characters
a_chars = ['f']
p_chars = ['p', 'o', 't']
c_chans = ['a1', 't9', 't3', 'c5', 'c3', 'c1', 'cz', 'c2', 'c4', 'c6', 't4', 't10', 'a2']
# exclude non-EEG channels
exclude = ['EKG', 'EOG_L', 'EOG_R']
# loop through all channels
for chan in self.spindles.keys():
if chan not in exclude:
# assign anterior-posterior
if chan.casefold() in c_chans:
ap = 'C'
elif any((c.casefold() in a_chars) for c in chan):
ap = 'A'
elif any((c.casefold() in p_chars) for c in chan):
ap = 'P'
# assign RL
if chan[-1] == 'z':
rl = 'C'
elif int(chan[-1]) % 2 == 0:
rl = 'R'
else:
rl = 'L'
# analyze individual spindles
for spin in self.spindles[chan]:
# set individual spindle data
spindle = self.spindles[chan][spin]
# get time stats
dur_ms = np.abs(spindle.index[0]) + spindle.index[-1]
amp_raw_rms = np.sqrt(np.mean(spindle.Raw.values**2))
amp_spfilt_rms = np.sqrt(np.mean(spindle.spfilt.values**2))
# get frequency stats
psd_i = self.spindle_psd_i[chan][spin]
spin_range = self.metadata['spindle_analysis']['spin_range']
spindle_power = psd_i[(psd_i.index >= spin_range[0]) & (psd_i.index <= spin_range[1])]
total_pwr = spindle_power.sum()
# set minimum distance between peaks equal to psd_bandwidth
samp_per_hz = len(psd_i)/(psd_i.index[-1]-psd_i.index[0])
bw_hz = self.metadata['spindle_analysis']['psd_bandwidth']
distance = samp_per_hz*bw_hz
# set minimum width in samples for a peak to be considered a peak
width = samp_per_hz*pk_width_hz
# get peaks
p_idx, props = find_peaks(spindle_power, distance=distance, width=width, prominence=0.0)
peaks = spindle_power.iloc[p_idx]
# get dominant frequency [major peak] (to 2 decimal points)
dominant_freq = round(peaks.idxmax(), 2)
total_peaks = len(peaks)
peak_freqs_hz = [round(idx, 2) for idx in peaks.index]
# ratio of peak amplitudes as a fraction of the dominant amplitude
peak_ratios = {np.round(key, 1):np.round((val/peaks.values.max()), 3) for key, val in peaks.items()}
# get 2nd most prominent peak as fraction of dominant peak power
if len(peak_ratios) > 1:
ratios_sorted = sorted(peak_ratios.items(), key=lambda x: x[1], reverse=True)
peak2_freq, peak2_ratio = ratios_sorted[1][0], ratios_sorted[1][1]
else:
peak2_freq, peak2_ratio = None, None
vals = [ap, rl, chan, spin, dur_ms, amp_raw_rms, amp_spfilt_rms, dominant_freq, total_peaks, peak_freqs_hz,
peak_ratios, peak2_freq, peak2_ratio, total_pwr]
row = {c:v for c, v in zip(cols, vals)}
# add row to stats_i list
stats_i_rows.append(row)
# convert row list into dataframe
stats_i_df = pd.DataFrame(stats_i_rows)
self.spindle_stats_i = stats_i_df
print('Done. Stats stored in obj.spindle_stats_i.')
def calc_spin_fstats_concat(self):
""" Calculate frequency statistics on concatenated spindles
To do: determine statistics to calculate for individual spindles
To calculate peaks on concatenated data, the spectrum is:
1. smoothed with an RMS window length equal to the psd_bandwidth
2. peaks must have a minimum horizontal distance equal to psd_bandwidth
3. peaks must have a minimum frequency width (set by width_hz)
"""
print('Calculating concatenated frequency-domain statistics...')
# skip if no spindles detected
if len(self.spindle_psd_concat) == 0:
print('No spindles detected. Done.')
else:
spin_range = self.metadata['spindle_analysis']['spin_range']
# pull minimum width (in Hz) for a peak to be considered a peak
pk_width_hz = self.metadata['spindle_analysis']['pk_width_hz']
#exclude non-EEG channels
exclude = ['EOG_L', 'EOG_R', 'EKG']
# create fstats dataframe & peaks dict
cols = ['dominant_freq_Hz', 'total_pwr_dB', 'total_peaks', 'peak_freqs_Hz', 'peak_ratios']
spindle_fstats = pd.DataFrame(columns=cols)
psd_concat_norm_peaks = {}
# set the parameters for picking peaks
# set minimum distance between adjacent peaks equal to spectral resolution
psd = self.spindle_psd_concat[list(self.spindle_psd_concat.keys())[0]]
samp_per_hz = len(psd)/(psd.index[-1]-psd.index[0])
bw_hz = self.metadata['spindle_analysis']['psd_bandwidth']
distance = samp_per_hz*bw_hz
# distance must be >= 1
if distance < 1:
distance = 1
# set minimum width in samples for a peak to be considered a peak
width = samp_per_hz*pk_width_hz
# set the moving window sample length equal to the psd bandwidth
mw_samples = int(distance)
# calculate stats for each channel
for chan in self.spindle_psd_concat.keys():
if chan not in exclude:
# smooth the signal
datsq = np.power(self.spindle_psd_concat_norm[chan]['normed_pwr'], 2)
window = np.ones(mw_samples)/float(mw_samples)
rms = np.sqrt(np.convolve(datsq, window, 'same'))
smoothed_data = pd.Series(rms, index=self.spindle_psd_concat[chan].index)
smoothed_spindle_power = smoothed_data[(smoothed_data.index >= spin_range[0]) & (smoothed_data.index <= spin_range[1])]
#calculate total spindle power (to 2 decimal points)
total_pwr = round(smoothed_spindle_power.sum(), 2)
# get peaks
p_idx, props = find_peaks(smoothed_spindle_power, distance=distance, width=width, prominence=0.0)
peaks = smoothed_spindle_power.iloc[p_idx]
# set dominant frequency to major peak
total_peaks = len(peaks)
if total_peaks > 0:
dominant_freq = round(peaks.idxmax(), 2)
peak_freqs_hz = [round(idx, 2) for idx in peaks.index]
# ratio of peak amplitudes as a fraction of the dominant amplitude
peak_ratios = {np.round(key, 1):np.round((val/peaks.values.max()), 2) for key, val in peaks.items()}
else:
dominant_freq, peak_freqs_hz, peak_ratios = None, None, None
# add row to dataframe
spindle_fstats.loc[chan] = [dominant_freq, total_pwr, total_peaks, peak_freqs_hz, peak_ratios]
# save values to peaks dict
psd_concat_norm_peaks[chan] = {'smoothed_data':smoothed_data, 'peaks':peaks, 'props':props}
self.psd_concat_norm_peaks = psd_concat_norm_peaks
self.spindle_fstats_concat = spindle_fstats
print('Done. Concat frequency stats stored in obj.spindle_fstats_concat.')
def analyze_spindles(self, psd_type='concat', psd_bandwidth=1.0, zpad=True, zpad_len=3.0, norm_range=[(4,6), (18, 25)], buff=False,
gottselig=True, fstats_concat=True):
"""
Starting code for spindle statistics/visualizations
Parameters
----------
psd_type: str (default: 'concat')
What data to use for psd calculations [Options: 'i' (individual spindles), 'concat' (spindles concatenated by channel)]
**this parameter is redundant now that 'i' is auto-calculated in spindle detection step -- can be hard-coded to 'concat'
psd_bandwidth: float
frequency bandwidth for power spectra calculations (Hz)
zpad: bool (default: False)
whether to zeropad the spindle data (for increased spectral resolution)
zpad_len: float
length to zero-pad spindles to (in seconds)
norm_range: list of tuple
frequency ranges for gottselig normalization
buff: bool (default: False)
whether to calculate means with a time buffer around spindle center
gottselig: bool (default: False)
whether to calculate gottselig normalization on concatenated spectrum
fstats_concat: bool (default: True)
whether to calculate concatenated spindle frequency statistics
Returns
-------
self.spindles: nested dict of dfs
nested dict with spindle data by channel {channel: {spindle_num:spindle_data}}
self.spindles_wbuffer: nested dict of dfs
nested dict with spindle data w/ timedelta buffer by channel {channel: {spindle_num:spindle_data}}
self.spindle_psd_concat: dict
power spectra for concatenated spindles by channel (Only if psd_type == 'concat')
format {channel: pd.Series} with index = frequencies and values = power (uV^2/Hz)
self.spindle_psd_concat_norm: nested dict (Only if psd_type == 'concat')
format {chan: pd.Series(normalized power, index=frequency)}
self.spindle_psd_i: nested dict
power spectra for individual spindles by channel (Only if psd_type == 'i')
format {channel: {spindle: pd.Series}} with index = frequencies and values = power (uV^2/Hz)
self.spindle_multitaper_calcs: pd.DataFrame
calculations used to calculated multitaper power spectral estimates for each channel
self.spindle_multitaper_calcs_concat: pd.DataFrame
calculations used to calculated concatenated multitaper power spectral estimates for each channel
self.spindle_features: pd.DataFrame
MultiIndex dataframe with calculated spindle statistics
"""
# calculate spindle & spindle buffer means
self.calc_spindle_means()
if buff:
self.calc_spindle_buffer_means()
# run time-domain spindle statistics by channel
self.calc_spin_tstats()
# calculate power spectra
if psd_type == 'concat': # this is redundant (should always be 'concat')
# calc psd on concated spindles
self.calc_spindle_psd_concat(psd_bandwidth)
if gottselig:
# normalize power spectra for quantification
self.calc_gottselig_norm(norm_range)
# calculate individual spindle stats
self.calc_spin_stats_i()
# calculate frequency stats
if fstats_concat:
self.calc_spin_fstats_concat()
def export_spindles(self, export_dir, raw=True, psd_concat=True, psd_i=True, stats=True):
""" Export spindle analyses
Parameters
----------
export_dir: str
path to export directory
raw: bool (default: True)
export raw EEG spindle detection tracings
psd_concat: bool (default: True)
export psd calculations and series for concatenated spindles
psd_i: bool (default: True)
export psd calculations and series for individual spindles
stats: bool (default: True)
export spindle time and frequency statistics
Returns
-------
*To be completed
"""
print(f'Spindle export directory: {export_dir}\n')
# make export directory if doesn't exit
if not os.path.exists(export_dir):
os.makedirs(export_dir)
# set base for savename
fname = self.metadata['file_info']['fname'].split('.')[0]
# dump metadata into main directory
filename = f'{fname}_spindle_metadata.txt'
savename = os.path.join(export_dir, filename)
with open(savename, 'w') as f:
json.dump(self.metadata, f, indent=4)
# export raw spindle tracings
if raw:
raw_dir = export_dir + '/spindle_tracings'
if not os.path.exists(raw_dir):
os.makedirs(raw_dir)
# export spindle tracings for each channel
print('Exporting spindle tracings...')
for chan in self.spindles.keys():
filename = f'{fname}_{chan}_spindle_tracings.txt'
savename = os.path.join(raw_dir, filename)
## use json dict dump to save space
spin_export = {}
for spin, series in self.spindles[chan].items():
# convert time from datetime to str
s = series.astype({'time': str})
spin_export[spin] = s.to_dict()
with open(savename, 'w') as f:
json.dump(spin_export, f, indent=4)
## for exporting into an excel workbook instead
# writer = pd.ExcelWriter(savename, engine='xlsxwriter')
# for spin in self.spindles[chan].keys():
# tab = f{'Spindle_{spin}'}
# self.spindles[chan][spin].to_excel(writer, sheet_name=tab)
# export spindle aggregates
print('Exporting spindle aggregates...')
filename = f'{fname}_spindle_aggregates.xlsx'
savename = os.path.join(raw_dir, filename)
writer = pd.ExcelWriter(savename, engine='xlsxwriter')
for chan in self.spindle_aggregates.keys():
for dtype in self.spindle_aggregates[chan].keys():
tab = '_'.join([chan, dtype])
self.spindle_aggregates[chan][dtype].to_excel(writer, sheet_name=tab)
writer.save()
# export spindle means
print('Exporting spindle means...\n')
for dtype in self.spindle_means.keys():
filename = f'{fname}_spindle_means_{dtype}.csv'
savename = os.path.join(raw_dir, filename)
self.spindle_means[dtype].to_csv(savename)
# export concated spectra
if psd_concat:
# set subdirectory
psd_concat_dir = export_dir + '/psd_concat'
if not os.path.exists(psd_concat_dir):
os.makedirs(psd_concat_dir)
# export multitaper calcs (concat)
print('Exporting concatenated spindle spectra calcs...')
filename = f'{fname}_spindle_mt_calcs_concat.csv'
savename = os.path.join(psd_concat_dir, filename)
self.spindle_multitaper_calcs_concat.to_csv(savename)
# export psd series
# convert series to dicts for json dump
psd_export = {}
for name, series in self.spindle_psd_concat.items():
psd_export[name] = series.to_dict()
filename = f'{fname}_spindle_psd_concat.txt'
savename = os.path.join(psd_concat_dir, filename)
with open(savename, 'w') as f:
json.dump(psd_export, f, indent=4)
# export psd norm
print('Exporting concatenated spindle norm spectra...\n')
# convert series to dicts for json dump
psd_norm_export = {}
for chan in self.spindle_psd_concat_norm.keys():
psd_norm_export[chan]={}
for name, series in self.spindle_psd_concat_norm[chan].items():
psd_norm_export[chan][name] = series.to_dict()
filename = f'{fname}_spindle_psd_norm.txt'
savename = os.path.join(psd_concat_dir, filename)
with open(savename, 'w') as f:
json.dump(psd_norm_export, f, indent=4)
if psd_i:
# export individual spindle spectra
print('Exporting individual spindle spectra...\n')
psd_i_dir = export_dir + '/psd_individual'
if not os.path.exists(psd_i_dir):
os.makedirs(psd_i_dir)
# export a file for each channel
for chan in self.spindle_psd_i.keys():
filename = f'{fname}_spindle_psd_i_{chan}.txt'
savename = os.path.join(psd_i_dir, filename)
# convert to dict for json dump
psd_export = {}
for spin, series in self.spindle_psd_i[chan].items():
psd_export[spin] = series.to_dict()
with open(savename, 'w') as f:
json.dump(psd_export, f, indent=4)
if stats:
print('Exporting spindle statistics...\n')
stats_dir = export_dir + '/statistics'
if not os.path.exists(stats_dir):
os.makedirs(stats_dir)
# export spindle time stats
filename = f'{fname}_spindle_tstats.csv'
savename = os.path.join(stats_dir, filename)
self.spindle_tstats.to_csv(savename)
# export spindle individual stats
filename = f'{fname}_spindle_stats_i.csv'
savename = os.path.join(stats_dir, filename)
self.spindle_stats_i.to_csv(savename)
# export spindle frequency stats
filename = f'{fname}_spindle_fstats_concat.csv'
savename = os.path.join(stats_dir, filename)
self.spindle_fstats_concat.to_csv(savename)
print('Done.')
## Slow Oscillation Detection Methods ##
def so_attributes(self):
""" make attributes for slow oscillation detection """
try:
self.channels
except AttributeError:
# create if doesn't exist
self.channels = [x[0] for x in self.data.columns]
dfs = ['sofiltEEG', 'spsofiltEEG']
[setattr(self, df, pd.DataFrame(index=self.data.index)) for df in dfs]
self.so_events = {}
self.so_rejects = {}
def make_butter_so(self, wn, order):
""" Make Butterworth bandpass filter [Parameters/Returns]
Parameters
----------
wn: list of int (default: [8, 16])
butterworth bandpass filter window
order: int (default: 4)
butterworth 1/2 filter order (applied forwards + backwards)
"""
nyquist = self.s_freq/2
wn_arr = np.asarray(wn)
if np.any(wn_arr <=0) or np.any(wn_arr >=1):
wn_arr = wn_arr/nyquist # must remake filter for each pt bc of differences in s_freq
self.so_sos = butter(order, wn_arr, btype='bandpass', output='sos')
print(f"Zero phase butterworth filter successfully created: order = {order}x{order}, bandpass = {wn}")
def make_butter_spso(self, spso_wn_pass, spso_wn_stop, spso_order):
""" Make Butterworth bandpass and bandstop filter
Parameters
----------
spso_wn_pass: list (default: [0.1, 17])
spso_wn_stop: list (default: [4.5, 7.5])
spso_order: int (default: 8)
"""
nyquist = self.s_freq/2
wn_pass_arr = np.asarray(spso_wn_pass)
wn_stop_arr = np.asarray(spso_wn_stop)
# must remake filter for each pt bc of differences in s_freq
if np.any(wn_pass_arr <=0) or np.any(wn_pass_arr >=1):
wn_pass_arr = wn_pass_arr/nyquist
if np.any(wn_stop_arr <=0) or np.any(wn_stop_arr >=1):
wn_stop_arr = wn_stop_arr/nyquist
self.spso_sos_bandstop = butter(spso_order, wn_stop_arr, btype='bandstop', output='sos')
self.spso_sos_bandpass = butter(spso_order, wn_pass_arr, btype='bandpass', output='sos')
print(f"Zero phase butterworth filter successfully created: order = {spso_order}x{spso_order} bandpass = {spso_wn_pass}")
print(f"Zero phase butterworth filter successfully created: order = {spso_order}x{spso_order} bandstop = {spso_wn_stop}")
def sofilt(self, i):
""" Apply Slow Oscillation Butterworth bandpass to signal by channel
Parameters
----------
i : str
channel to filter
Returns
-------
self.sofiltEEG: pandas.DataFrame
filtered EEG data
"""
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = self.data[i][self.data[i]['Raw'].isna()]
data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
data_notnan_filt = sosfiltfilt(self.so_sos, data_notnan.to_numpy(), axis=0)
data_notnan['SOFilt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['SOFilt']).sort_index()
# add channel to main dataframe
self.sofiltEEG[i] = filt_chan
def spsofilt(self, i):
""" Apply Butterworth bandpass-bandstop to signal by channel
Parameters
----------
i : str
channel to filter
"""
# separate NaN and non-NaN values to avoid NaN filter output on cleaned data
data_nan = self.data[i][self.data[i]['Raw'].isna()]
data_notnan = self.data[i][self.data[i]['Raw'].isna() == False]
# filter notNaN data & add column to notNaN df
## bandpass
data_notnan_bandpassed = sosfiltfilt(self.spso_sos_bandpass, data_notnan.to_numpy(), axis=0)
## now bandstop
data_notnan_filt = sosfiltfilt(self.spso_sos_bandstop, data_notnan_bandpassed, axis=0)
data_notnan['Filt'] = data_notnan_filt
# merge NaN & filtered notNaN values, sort on index
filt_chan = data_nan['Raw'].append(data_notnan['Filt']).sort_index()
# add channel to main dataframe
self.spsofiltEEG[i] = filt_chan
def get_so(self, i, method, posx_thres, negposx_thres, npeak_thres, negpos_thres):
""" Detect slow oscillations. Based on detection algorithm from Molle 2011 & Massimini 2004.
Parameters
----------
i : str
channel to filter
method: str (default: 'absolute')
SO detection method. [Options: 'absolute', 'ratio']
'absolute' employs absolute voltage values for npeak_thres and negpos_thres.
'ratio' sets npeak_thres to None and negpos_thres to 1.75x the negative peak
voltage for a given detection (ratio derived from Massimini 2004)
* NOTE: the idea was to use 'ratio' if reference is a single scalp electrode (e.g. FCz), which would
result in variable absolute amplitudes according to electrode location. In practice this doesn't
seem to pull accurate SOs. Needs a minimum threshold for the negative peak
posx_thres: list of float (default: [0.9, 2])
threshold of consecutive positive-negative zero crossings in seconds. Equivalent to Hz range
for slow oscillations
negposx_thres: int (default: 300)
minimum time (in milliseconds) between positive-to-negative and negative-to-positive zero crossing
npeak_thres: int (default: -80)
negative peak threshold in microvolts
negpos_thres: int (default: 140)
minimum amplitude threshold for negative to positive peaks
"""
so_events = {}
nx = 0
# convert thresholds
posx_thres_td = [pd.Timedelta(s, 's') for s in posx_thres]
npeak_mv = npeak_thres*(10**-3)
negpos_mv = negpos_thres*(10**-3)
# convert channel data to series
chan_dat = self.sofiltEEG[i]
# get zero-crossings
mask = chan_dat > 0
# shift pos/neg mask by 1 and compare
## insert a false value at position 0 on the mask shift
mask_shift = np.insert(np.array(mask), 0, None)
## remove the last value of the shifted mask and set the index
## to equal the original mask
mask_shift = pd.Series(mask_shift[:-1], index=mask.index)
# neg-pos are True; pos-neg are False
so_zxings = mask[mask != mask_shift]
# make empty lists for start and end times of vetted SO periods
pn_pn_starts = []
pn_pn_ends = []
cols = ['start', 'end']
# for each zero-crossing
for e, (idx, xing) in enumerate(so_zxings.items()):
# if it's not the last or second-to-last crossing
if e not in [len(so_zxings)-1, len(so_zxings)-2]:
# if it's positive-to-negative
if xing == False:
# check the distance to the next negative-to-positive
pn_np_intvl = so_zxings.index[e+1] - idx
# if it's >= 300ms
if pn_np_intvl >= pd.to_timedelta(negposx_thres, 'ms'):
# if it's not the last or second-to-last crossing
if e not in [len(so_zxings)-1, len(so_zxings)-2]:
# if the next positive-to-negative crossing is within threshold
pn_pn_intvl = so_zxings.index[e+2] - idx
if posx_thres_td[0] <= pn_pn_intvl <= posx_thres_td[1]:
# grab the next positive to negative crossing that completes the SO
# period and add values to lists
pn_pn_starts.append(idx)
pn_pn_ends.append(so_zxings.index[e+2])
# turn start and end lists into dataframe
so_periods = pd.DataFrame(list(zip(pn_pn_starts, pn_pn_ends)), columns=cols)
# loop through so_periods df
for idx, row in so_periods.iterrows():
# find negative & positive peaks
npeak_time = chan_dat.loc[row.start:row.end].idxmin()
npeak_val = chan_dat.loc[npeak_time]
ppeak_time = chan_dat.loc[row.start:row.end].idxmax()
ppeak_val = chan_dat.loc[ppeak_time]
# check absolute value thresholds if method is absolute
if method == 'absolute':
# if negative peak is < than threshold
if npeak_val < npeak_mv:
# if negative-positive peak amplitude is >= than threshold
if np.abs(npeak_val) + np.abs(ppeak_val) >= negpos_mv:
so_events[nx] = {'pn_zcross1': row.start, 'pn_zcross2': row.end, 'npeak': npeak_time,
'ppeak': ppeak_time, 'npeak_minus2s': npeak_time - datetime.timedelta(seconds=2),
'npeak_plus2s': npeak_time + datetime.timedelta(seconds=2)}
nx += 1
# otherwise check ratio thresholds
elif method == 'ratio':
# npeak_val can be anything
# if negative-positive peak amplitude is >= 1.75x npeak_val
if np.abs(npeak_val) + np.abs(ppeak_val) >= 1.75*np.abs(npeak_val):
so_events[nx] = {'pn_zcross1': row.start, 'pn_zcross2': row.end, 'npeak': npeak_time,
'ppeak': ppeak_time, 'npeak_minus2s': npeak_time - datetime.timedelta(seconds=2),
'npeak_plus2s': npeak_time + datetime.timedelta(seconds=2)}
nx += 1
self.so_zxings = so_zxings
self.so_events[i] = so_events
def soMultiIndex(self):
""" combine dataframes into a multiIndex dataframe"""
# reset column levels
self.sofiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])
self.spsofiltEEG.columns = pd.MultiIndex.from_arrays([self.channels, np.repeat(('Filtered'), len(self.channels))],names=['Channel','datatype'])
# list df vars for index specs
# dfs =[self.sofiltEEG] # for > speed, don't store spinfilt_RMS as an attribute
# calcs = ['Filtered']
# lvl0 = np.repeat(self.channels, len(calcs))
# lvl1 = calcs*len(self.channels)
# # combine & custom sort --> what was this for??
# self.so_calcs = pd.concat(dfs, axis=1).reindex(columns=[lvl0, lvl1])
def detect_so(self, wn=[0.1, 4], order=2, method='absolute', posx_thres = [0.9, 2], negposx_thres = 300, npeak_thres = -80,
negpos_thres = 140, spso_wn_pass = [0.1, 17], spso_wn_stop = [4.5, 7.5], spso_order=8):
""" Detect slow oscillations by channel
TO DO: Update docstring
Parameters
----------
wn: list (default: [0.1, 4])
Butterworth filter window
order: int (default: 2)
Butterworth filter order (default of 2x2 from Massimini et al., 2004)
method: str (default: 'ratio')
SO detection method. [Options: 'absolute', 'ratio']
'absolute' employs absolute voltage values for npeak_thres and negpos_thres.
'ratio' sets npeak_thres to None and negpos_thres to 1.75x the negative peak
voltage for a given detection (ratio derived from Massimini 2004)
* NOTE: 'ratio' should be used if reference is a single scalp electrode (e.g. FCz), which would
result in variable absolute amplitudes according to electrode location
posx_thres: list of float (default: [0.9, 2])
threshold of consecutive positive-negative zero crossings in seconds
negposx_thres: int (default: 300)
minimum time (in milliseconds) between positive-to-negative and negative-to-positive zero crossing
npeak_thres: int (default: -80)
negative peak threshold in microvolts
negpos_thres: int (default: 140)
minimum amplitude threshold for negative to positive peaks
Returns
-------
self.so_sos
self.so_filtEEG
self.so_calcs
self.so_zxings
self.so_events
self.so_rejects
"""
self.metadata['so_analysis'] = {'so_filtwindow': wn, 'so_filtorder_half': order, 'method': method,
'posx_thres': posx_thres, 'negposx_thres': negposx_thres, 'npeak_thres': npeak_thres,
'negpos_thres': negpos_thres}
# set attributes
self.so_attributes()
# make butterworth filter
self.make_butter_so(wn, order)
self.make_butter_spso(spso_wn_pass, spso_wn_stop, spso_order)
# loop through channels (all channels for plotting ease)
for i in self.channels:
# Filter
self.sofilt(i)
self.spsofilt(i)
# Detect SO
self.get_so(i, method, posx_thres, negposx_thres, npeak_thres, negpos_thres)
# combine dataframes
print('Combining dataframes...')
self.soMultiIndex()
print('done.')
def analyze_so(self, zmethod='trough'):
""" starting code for slow oscillation statistics/visualizations """
## create dict of dataframes for slow oscillation analysis
print('Creating individual dataframes...')
so = {}
for chan in self.so_events.keys():
so[chan] = {}
for i, s in self.so_events[chan].items():
# create individual df for each SO
start = self.so_events[chan][i]['npeak_minus2s']
end = self.so_events[chan][i]['npeak_plus2s']
so_data = self.data[chan]['Raw'].loc[start:end]
so_filtdata = self.sofiltEEG[chan]['Filtered'].loc[start:end]
spso_filtdata = self.spsofiltEEG[chan]['Filtered'].loc[start:end]
# find & drop any NaN insertions at 1ms sfreq (likely artifact from blocking data)
nan_idx = [e for e, x in enumerate(np.diff(so_data.index)) if int(x) == 3000000]
if len(nan_idx) > 0:
so_data = so_data.drop(so_data.index[nan_idx])
so_filtdata = so_filtdata.drop(so_filtdata.index[nan_idx])
spso_filtdata = spso_filtdata.drop(spso_filtdata.index[nan_idx])
# set new index so that each SO is zero-centered around the negative peak
ms1 = list(range(-2000, 0, int(1/self.metadata['analysis_info']['s_freq']*1000)))
ms2 = [-x for x in ms1[::-1]]
id_ms = ms1 + [0] + ms2
# create new dataframe
so[chan][i] = pd.DataFrame(index=id_ms)
so[chan][i].index.name='id_ms'
# if the SO is not a full 2s from the beginning OR if there's a data break < 2seconds before the peak
if (start < self.data.index[0]) or (start < so_data.index[0]):
# extend the df index to the full 2s
time_freq = str(int(1/self.metadata['analysis_info']['s_freq']*1000000))+'us'
time = pd.date_range(start=start, end=end, freq=time_freq)
so[chan][i]['time'] = time
# append NaNs onto the end of the EEG data
nans = np.repeat(np.NaN, len(time)-len(so_data))
data_extended = list(nans) + list(so_data.values)
so[chan][i]['Raw'] = data_extended
filtdata_extended = list(nans) + list(so_filtdata.values)
so[chan][i]['sofilt'] = filtdata_extended
spsofiltdata_extended = list(nans) + list(spso_filtdata.values)
so[chan][i]['spsofilt'] = spsofiltdata_extended
# if the SO is not a full 2s from the end OR if there's a data break < 2seconds after the peak
elif (end > self.data.index[-1]) or (end > so_data.index[-1]):
# extend the df index to the full 2s
time_freq = str(int(1/self.metadata['analysis_info']['s_freq']*1000000))+'us'
time = pd.date_range(start=start, end=end, freq=time_freq)
so[chan][i]['time'] = time
# append NaNs onto the end of the EEG data
nans = np.repeat(np.NaN, len(time)-len(so_data))
data_extended = list(so_data.values) + list(nans)
so[chan][i]['Raw'] = data_extended
filtdata_extended = list(so_filtdata.values) + list(nans)
so[chan][i]['sofilt'] = filtdata_extended
spsofiltdata_extended = list(spso_filtdata.values) + list(nans)
so[chan][i]['spsofilt'] = spsofiltdata_extended
else:
so[chan][i]['time'] = so_data.index
so[chan][i]['Raw'] = so_data.values
so[chan][i]['sofilt'] = so_filtdata.values
so[chan][i]['spsofilt'] = spso_filtdata.values
self.so = so
print('Dataframes created. Slow oscillation data stored in obj.so.')
def align_spindles(self):
""" Align spindles along slow oscillations """
print('Aligning spindles to slow oscillations...')
so = self.so
data = self.data
spindles = self.spindles
# create a dictionary of SO indices
so_dict = {}
for chan in so:
so_dict[chan] = [so[chan][i].time.values for i in so[chan]]
# flatten the dictionary into a boolean df
so_bool_dict = {}
for chan in so_dict:
if chan not in ['EOG_L', 'EOG_R', 'EKG']:
so_flat = [time for so in so_dict[chan] for time in so]
so_bool_dict[chan] = np.isin(data.index.values, so_flat)
so_bool = pd.DataFrame(so_bool_dict, index=data.index)
# create a spindle boolean df
spin_bool_dict = {}
for chan in spindles.keys():
if chan not in ['EOG_L', 'EOG_R', 'EKG']:
spins_tlist = [df.time.values for df in spindles[chan].values()]
spins_flat = [time for spindle in spins_tlist for time in spindle]
spin_bool_dict[chan] = np.isin(data.index.values, spins_flat)
spin_bool = pd.DataFrame(spin_bool_dict, index=data.index)
# create a map of slow oscillations to spindles
so_spin_map = {}
for chan in spindles.keys():
so_spin_map[chan] = {}
so_flat = [time for so in so_dict[chan] for time in so]
# for each spindle
for e_spin, spin in spindles[chan].items():
# grab the trough of the filtered spindle
spin_trough = np.datetime64(spin.loc[0].time)
# if spindle trough overlaps w/ SO +/- 2s:
if spin_trough in so_flat:
for e_so, so_times in enumerate(so_dict[chan]):
if spin_trough in so_times:
try:
so_spin_map[chan][e_so].append(e_spin)
except KeyError:
so_spin_map[chan][e_so] = [e_spin]
print('Compiling aggregate dataframe...')
# Make aggregate dataframe
spso_aggregates = {}
for chan in so.keys():
if chan not in ['EOG_L', 'EOG_R', 'EKG']:
spso_aggregates[chan] = {}
for so_idx, spins in so_spin_map[chan].items():
spso_agg = so[chan][so_idx]
for s in spins:
# add spindle filtered and spso filtered data for each spindle
spso_agg = spso_agg.join(self.spfiltEEG[(chan, 'Filtered')].loc[spindles[chan][s].time.values].rename('spin_'+str(s)+'_spfilt'),
on='time', how='outer')
spso_aggregates[chan][so_idx] = spso_agg
self.so_bool = so_bool
self.spin_bool = spin_bool
self.so_spin_map = so_spin_map
self.spso_aggregates = spso_aggregates
print('Alignment complete. Aggregate data stored in obj.spso_aggregates.\n')
def spso_distribution(self):
""" get distribution of spindles along slow oscillations by cluster """
print('Calculating spindle distribution along slow oscillations...')
# create dicts to hold result
spin_dist_bool = {'all':{'0':{}, '1':{}}, 'by_chan':{}}
spin_dist = {'all':{'0':{}, '1':{}}, 'by_chan':{}}
# Make boolean arrays of spindle distribution
for chan in self.spso_aggregates.keys():
spin_dist_bool['by_chan'][chan] = {'0':{}, '1':{}}
# iterrate over individual SO dataframes
for so_id, df in self.spso_aggregates[chan].items():
# grab spindle columns
spin_cols = [x for x in df.columns if x.split('_')[0] == 'spin']
for spin in spin_cols:
# get index & cluster of spindle
spin_idx = int(spin_cols[0].split('_')[1])
clust = int(self.spindle_stats_i[(self.spindle_stats_i.chan == chan) & (self.spindle_stats_i.spin == spin_idx)].cluster.values)
# set spindle column & idx labels, save boolean values to dict
spin_label = chan + '_' + str(spin_idx)
spin_dist_bool['all'][str(clust)][spin_label] = df[df.index.notna()][spin].notna().values
spin_dist_bool['by_chan'][chan][str(clust)][spin_idx] = df[df.index.notna()][spin].notna().values
idx = df[df.index.notna()].index
# create series & normalize from dataframe
for clust, dct in spin_dist_bool['all'].items():
# calculate # of spindles at each timedelta
bool_df = pd.DataFrame(dct, index=idx)
dist_ser = bool_df.sum(axis=1)
# normalize the values to total # of spindles in that cluster
dist_norm = dist_ser/len(bool_df.columns)
spin_dist['all'][str(clust)]['dist'] = dist_ser
spin_dist['all'][str(clust)]['dist_norm'] = dist_norm
# Get distribution by channel
for chan, clst_dict in spin_dist_bool['by_chan'].items():
spin_dist['by_chan'][chan] = {'0':{}, '1':{}}
for clust, dct in clst_dict.items():
# calculate # of spindles at each timedelta
bool_df = pd.DataFrame(dct, index=idx)
dist_ser = bool_df.sum(axis=1)
# normalize the values to total # of spindles in that cluster
dist_norm = dist_ser/len(bool_df.columns)
spin_dist['by_chan'][chan][str(clust)]['dist'] = dist_ser
spin_dist['by_chan'][chan][str(clust)]['dist_norm'] = dist_norm
# use channel distributions to get distirbutions by location
# assign anterior-posterior characters
a_chars = ['f']
p_chars = ['p', 'o', 't']
c_chans = ['a1', 't9', 't3', 'c5', 'c3', 'c1', 'cz', 'c2', 'c4', 'c6', 't4', 't10', 'a2']
spin_dist_bool['AP'] = {'A':{}, 'P':{}}
spin_dist_bool['LR'] = {'L':{}, 'R':{}}
spin_dist_bool['quads'] = {'al':{}, 'ar':{}, 'pl':{}, 'pr':{}}
# recategorize channels into AP/RL/quads dicts
for chan, spso_dict in spin_dist_bool['by_chan'].items():
# assign anterior-posterior
if chan.casefold() in c_chans:
ap = 'C'
elif any((c.casefold() in a_chars) for c in chan):
ap = 'A'
elif any((c.casefold() in p_chars) for c in chan):
ap = 'P'
# assign RL
if chan[-1] == 'z':
rl = 'C'
elif int(chan[-1]) % 2 == 0:
rl = 'R'
else:
rl = 'L'
for clust, clust_dict in spso_dict.items():
for spin, dct in clust_dict.items():
# give dict a new name
dname = chan + '_' + clust + '_' + str(spin)
# move item into proper dicts
if ap == 'A':
spin_dist_bool['AP']['A'][dname] = dct
if rl == 'R':
spin_dist_bool['LR']['R'][dname] = dct
spin_dist_bool['quads']['ar'][dname] = dct
elif rl == 'L':
spin_dist_bool['LR']['L'][dname] = dct
spin_dist_bool['quads']['al'][dname] = dct
elif ap == 'P':
spin_dist_bool['AP']['P'][dname] = dct
if rl == 'R':
spin_dist_bool['LR']['R'][dname] = dct
spin_dist_bool['quads']['pr'][dname] = dct
elif rl == 'L':
spin_dist_bool['LR']['L'][dname] = dct
spin_dist_bool['quads']['pl'][dname] = dct
# git distributions for dicts
dicts = ['AP', 'LR', 'quads']
for d in dicts:
spin_dist[d] = {}
for group, bool_dict in spin_dist_bool[d].items():
spin_dist[d][group] = {}
bool_df =
|
pd.DataFrame(bool_dict, index=idx)
|
pandas.DataFrame
|
"""
Unit test of Inverse Transform
"""
import unittest
import pandas as pd
import numpy as np
import category_encoders as ce
from sklearn.compose import ColumnTransformer
import sklearn.preprocessing as skp
import catboost as cb
import sklearn
import lightgbm
import xgboost
from shapash.utils.transform import inverse_transform, apply_preprocessing
from shapash.utils.columntransformer_backend import get_feature_names, get_names, get_list_features_names
# TODO
# StandardScaler return object vs float vs int
# Target encoding return object vs float
class TestInverseTransformColumnsTransformer(unittest.TestCase):
def test_inv_transform_ct_1(self):
"""
test inv_transform_ct with multiple encoding and drop option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_2(self):
"""
test inv_transform_ct with multiple encoding and passthrough option
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['chicago', 'chicago', 'paris'],
'onehot_ce_state': ['US', 'FR', 'FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_3(self):
"""
test inv_transform_ct with multiple encoding and dictionnary
"""
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']},
index=['index1', 'index2'])
enc = ColumnTransformer(
transformers=[
('onehot_ce', ce.OneHotEncoder(), ['city', 'state']),
('onehot_skp', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
index=['index1', 'index2', 'index3'])
expected = pd.DataFrame({'onehot_ce_city': ['CH', 'CH', 'PR'],
'onehot_ce_state': ['US-FR', 'US-FR', 'US-FR'],
'onehot_skp_city': ['chicago', 'chicago', 'paris'],
'onehot_skp_state': ['US', 'FR', 'FR'],
'other': ['A-B', 'A-B', 'C']},
index=['index1', 'index2', 'index3'])
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'col3_0', 'col3_1', 'col4_0', 'col4_1', 'other']
result.index = ['index1', 'index2', 'index3']
input_dict1 = dict()
input_dict1['col'] = 'onehot_ce_city'
input_dict1['mapping'] = pd.Series(data=['chicago', 'paris'], index=['CH', 'PR'])
input_dict1['data_type'] = 'object'
input_dict2 = dict()
input_dict2['col'] = 'other'
input_dict2['mapping'] = pd.Series(data=['A', 'B', 'C'], index=['A-B', 'A-B', 'C'])
input_dict2['data_type'] = 'object'
input_dict3 = dict()
input_dict3['col'] = 'onehot_ce_state'
input_dict3['mapping'] = pd.Series(data=['US', 'FR'], index=['US-FR', 'US-FR'])
input_dict3['data_type'] = 'object'
list_dict = [input_dict2, input_dict3]
original = inverse_transform(result, [enc,input_dict1,list_dict])
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_4(self):
"""
test inv_transform_ct with single target category encoders and passthrough option
"""
y = pd.DataFrame(data=[0, 1, 1, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris', 'paris', 'chicago'],
'state': ['US', 'FR', 'FR', 'US'],
'other': ['A', 'B', 'B', 'B']})
enc = ColumnTransformer(
transformers=[
('target', ce.TargetEncoder(), ['city', 'state'])
],
remainder='passthrough')
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame(data={'target_city': ['chicago', 'chicago', 'paris'],
'target_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']},
dtype=object)
enc.fit(train, y)
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_5(self):
"""
test inv_transform_ct with single target category encoders and drop option
"""
y = pd.DataFrame(data=[0, 1, 0, 0], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris', 'chicago', 'paris'],
'state': ['US', 'FR', 'US', 'FR'],
'other': ['A', 'B', 'A', 'B']})
enc = ColumnTransformer(
transformers=[
('target', ce.TargetEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame(data={
'target_city': ['chicago', 'chicago', 'paris'],
'target_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_6(self):
"""
test inv_transform_ct with Ordinal Category Encoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('ordinal', ce.OrdinalEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'ordinal_city': ['chicago', 'chicago', 'paris'],
'ordinal_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_7(self):
"""
test inv_transform_ct with category Ordinal Encoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('ordinal', ce.OrdinalEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'ordinal_city': ['chicago', 'chicago', 'paris'],
'ordinal_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_8(self):
"""
test inv_transform_ct with Binary encoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('binary', ce.BinaryEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'binary_city': ['chicago', 'chicago', 'paris'],
'binary_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_9(self):
"""
test inv_transform_ct with Binary Encoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('binary', ce.BinaryEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'binary_city': ['chicago', 'chicago', 'paris'],
'binary_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_10(self):
"""
test inv_transform_ct with BaseN Encoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('basen', ce.BaseNEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'basen_city': ['chicago', 'chicago', 'paris'],
'basen_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_11(self):
"""
test inv_transform_ct with BaseN Encoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('basen', ce.BaseNEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'basen_city': ['chicago', 'chicago', 'paris'],
'basen_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_12(self):
"""
test inv_transform_ct with single OneHotEncoder and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', ce.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'onehot_city': ['chicago', 'chicago', 'paris'],
'onehot_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_13(self):
"""
test inv_transform_ct with OneHotEncoder and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', ce.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'onehot_city': ['chicago', 'chicago', 'paris'],
'onehot_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_14(self):
"""
test inv_transform_ct with OneHotEncoder Sklearn and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'onehot_city': ['chicago', 'chicago', 'paris'],
'onehot_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_15(self):
"""
test inv_transform_ct with OneHotEncoder Sklearn and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('onehot', skp.OneHotEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'onehot_city': ['chicago', 'chicago', 'paris'],
'onehot_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1_0', 'col1_1', 'col2_0', 'col2_1', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_16(self):
"""
test inv_tranform_ct with ordinal Encoder sklearn and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('ordinal', skp.OrdinalEncoder(), ['city', 'state'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'ordinal_city': ['chicago', 'chicago', 'paris'],
'ordinal_state': ['US', 'FR', 'FR']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_17(self):
"""
test inv_transform_ct with OrdinalEncoder Sklearn and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'city': ['chicago', 'paris'],
'state': ['US', 'FR'],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('ordinal', skp.OrdinalEncoder(), ['city', 'state'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'city': ['chicago', 'chicago', 'paris'],
'state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'ordinal_city': ['chicago', 'chicago', 'paris'],
'ordinal_state': ['US', 'FR', 'FR'],
'other': ['A', 'B', 'C']})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_18(self):
"""
test inv_transform_ct with Standardscaler Encoder Sklearn and passthrough option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('std', skp.StandardScaler(), ['num1', 'num2'])
],
remainder='passthrough')
enc.fit(train, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 3],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'std_num1': [0.0, 1.0, 1.0],
'std_num2': [0.0, 2.0, 3.0],
'other': ['A', 'B', 'C']},
dtype=object)
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2', 'other']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_19(self):
"""
test inv_transform_ct with StandarScaler Encoder Sklearn and drop option
"""
y = pd.DataFrame(data=[0, 1], columns=['y'])
train = pd.DataFrame({'num1': [0, 1],
'num2': [0, 2],
'other': ['A', 'B']})
enc = ColumnTransformer(
transformers=[
('std', skp.StandardScaler(), ['num1', 'num2'])
],
remainder='drop')
enc.fit(train, y)
test = pd.DataFrame({'num1': [0, 1, 1],
'num2': [0, 2, 3],
'other': ['A', 'B', 'C']})
expected = pd.DataFrame({'std_num1': [0.0, 1.0, 1.0],
'std_num2': [0.0, 2.0, 3.0]})
result = pd.DataFrame(enc.transform(test))
result.columns = ['col1', 'col2']
original = inverse_transform(result, enc)
pd.testing.assert_frame_equal(original, expected)
def test_inv_transform_ct_20(self):
"""
test inv_transform_ct with QuantileTransformer Encoder Sklearn and passthrough option
"""
y =
|
pd.DataFrame(data=[0, 1], columns=['y'])
|
pandas.DataFrame
|
import utils
from models import wavenet, lstm, resnet_1, resnet_2
import warnings
warnings.filterwarnings('ignore')
from omegaconf import OmegaConf
import numpy as np
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import roc_auc_score
import pandas as pd
MODEL_NAMES_DICT = {
'wavenet': wavenet.Model,
"resnet_1": resnet_1.Model,
"resnet_2": resnet_2.Model,
"lstm": lstm.Model
}
def main(param):
utils.seed_everything(0)
utils.info('read csv...')
train, test, submit = utils.read_data("./data")
utils.info('read wave data...')
train_wave = utils.read_wave("./data/ecg/" + train["Id"] + ".npy")
train_y = train["target"]
train["sex"] = train["sex"].replace({"male": 0, "female": 1})
test["sex"] = test["sex"].replace({"male": 0, "female": 1})
if param.validation == "custom":
human_mask = train['label_type'] == 'human'
train_meta_human = train[human_mask][["sex", "age"]]
train_wave_human = train_wave[human_mask]
train_meta_auto = train[~human_mask][["sex", "age"]]
train_wave_auto = train_wave[~human_mask]
train_y_human = train_y[human_mask]
train_y_auto = train_y[~human_mask]
kf = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)
val_preds = np.zeros(train_meta_human.shape[0])
utils.info('start training...')
for (fold, (train_index, val_index)) in enumerate(kf.split(train_meta_human, train_y_human)):
utils.info(f"{'=' * 20} fold {fold + 1} {'=' * 20}")
# foldごとに定義しないとリークしてしまう
model = MODEL_NAMES_DICT[param.model_name](param)
train_input_wave = np.concatenate([
train_wave_human[train_index],
train_wave_auto
])
train_input_meta = np.concatenate([
train_meta_human.iloc[train_index],
train_meta_auto
])
train_y_concat = np.concatenate([
train_y_human.iloc[train_index],
train_y_auto
])
val_input_wave = train_wave_human[val_index]
val_input_meta = train_meta_human.iloc[val_index]
val_y_concat = train_y_human.iloc[val_index]
val_pred = model.fit(
[train_input_wave, train_input_meta],
train_y_concat,
[val_input_wave, val_input_meta],
val_y_concat,
fold
)
# foldを忘れないよう注意. fitの帰り値はval_pred
val_preds[val_index] += val_pred
utils.info("AUC score:", roc_auc_score(train_y[human_mask], val_preds))
pd.DataFrame(val_preds, columns=["pred"]).to_csv('./logs/{}/val_pred_custom.csv'.format(param.model_name))
elif param.validation == "naive":
train_meta = train[["sex", "age"]]
kf = StratifiedKFold(n_splits=5, random_state=10, shuffle=True)
val_preds = np.zeros(train_meta.shape[0])
utils.info('start training...')
for (fold, (train_index, val_index)) in enumerate(kf.split(train_meta, train_y)):
utils.info(f"{'=' * 20} fold {fold + 1} {'=' * 20}")
model = MODEL_NAMES_DICT[param.model_name](param)
val_pred = model.fit(
[train_wave[train_index], train_meta.iloc[train_index]],
train_y[train_index],
[train_wave[val_index], train_meta.iloc[val_index]],
train_y[val_index],
fold
)
# foldを忘れないよう注意. fitの帰り値はval_pred
val_preds[val_index] += val_pred
utils.info("AUC score:", roc_auc_score(train_y, val_preds))
|
pd.DataFrame(val_preds, columns=["pred"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
import nose
import numpy as np
from datetime import datetime
from pandas.util import testing as tm
from pandas.core import config as cf
from pandas.compat import u
from pandas.tslib import iNaT
from pandas import (NaT, Float64Index, Series,
DatetimeIndex, TimedeltaIndex, date_range)
from pandas.types.dtypes import DatetimeTZDtype
from pandas.types.missing import (array_equivalent, isnull, notnull,
na_value_for_dtype)
_multiprocess_can_split_ = True
def test_notnull():
assert notnull(1.)
assert not notnull(None)
assert not notnull(np.NaN)
with cf.option_context("mode.use_inf_as_null", False):
assert notnull(np.inf)
assert notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.all()
with cf.option_context("mode.use_inf_as_null", True):
assert not notnull(np.inf)
assert not notnull(-np.inf)
arr = np.array([1.5, np.inf, 3.5, -np.inf])
result = notnull(arr)
assert result.sum() == 2
with cf.option_context("mode.use_inf_as_null", False):
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
def test_isnull():
assert not isnull(1.)
assert isnull(None)
assert isnull(np.NaN)
assert not isnull(np.inf)
assert not isnull(-np.inf)
# series
for s in [tm.makeFloatSeries(), tm.makeStringSeries(),
tm.makeObjectSeries(), tm.makeTimeSeries(),
tm.makePeriodSeries()]:
assert (isinstance(isnull(s), Series))
# frame
for df in [tm.makeTimeDataFrame(), tm.makePeriodFrame(),
tm.makeMixedDataFrame()]:
result = isnull(df)
expected = df.apply(isnull)
tm.assert_frame_equal(result, expected)
# panel
for p in [tm.makePanel(), tm.makePeriodPanel(), tm.add_nans(tm.makePanel())
]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel_equal(result, expected)
# panel 4d
for p in [tm.makePanel4D(), tm.add_nans_panel4d(tm.makePanel4D())]:
result = isnull(p)
expected = p.apply(isnull)
tm.assert_panel4d_equal(result, expected)
def test_isnull_lists():
result = isnull([[False]])
exp = np.array([[False]])
assert (np.array_equal(result, exp))
result =
|
isnull([[1], [2]])
|
pandas.types.missing.isnull
|
import os,sys,time,subprocess,re,gzip,platform
from math import ceil
from tqdm import *
import pandas as pd
import numpy as np
from tempfile import *
import scipy.stats as stats
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from difflib import SequenceMatcher
from pydna.assembly import Assembly
from pydna.dseqrecord import Dseqrecord
from Bio.Seq import Seq
from collections import defaultdict
from functools import partial
from multiprocessing import Process,Pool,Manager,Value
#from fsplit.filesplit import FileSplit
if platform.system()=='Linux':compress='zcat'
elif platform.system()=='Darwin':compress='gunzip -c'
else:
print ('2kupl runs on either Linux or Macos')
os._exit(0)
def dist(s1,s2,start=0):
if len(s1)!=len(s2):raise ValueError('undefined for sequences of unequal length')
hd=0
for e1,e2 in zip(s1[start:],s2[start:]):
if e1!=e2:hd+=1
if hd>1:return 9
return hd
#return sum(chr1!=chr2 for chr1,chr2 in zip(s1,s2))
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f %s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f %s%s" % (num, 'Yi', suffix)
def createindex(lock,fi):
global pair_T,pair_N
idx_head,idx_tail=defaultdict(lambda: defaultdict(list)),defaultdict(lambda: defaultdict(list))
dat=pd.read_csv(fi,header=None,index_col=None,sep=' ')#the 1st line in kmercount table is considered to be the head by kmerFilter
subwildkmers = dict(zip(dat.iloc[:,0],dat.iloc[:,1]))
for p in subwildkmers.keys():
idx_head[p[:15]]['ref'].append(p)
idx_tail[p[-15:]]['ref'].append(p)
for q in spekmer:
idx_head[q[:15]]['mut'].append(q)
idx_tail[q[-15:]]['mut'].append(q)
subpair_T,subpair_N=pairkmers(idx_head,idx_tail)
lock.acquire()
pair_N+=subpair_N
pair_T+=subpair_T
lock.release()
idx_head.clear()
idx_tail.clear()
def pairkmers(idx_head,idx_tail):
subpair_T,subpair_N=[],[]
for key in tqdm(idx_head.keys()):
if idx_head[key]['ref']==[] or idx_head[key]['mut']==[]:continue
for q in idx_head[key]['mut']:
for j in idx_head[key]['ref']:
if q==j:break#second check if kmer is common in both T&N
if dist(q,j,15)==1:
subpair_T.append(q)
subpair_N.append(j)
for key in tqdm(idx_tail.keys()):
if idx_tail[key]['ref']==[] or idx_tail[key]['mut']==[]:continue
for q in idx_tail[key]['mut']:
for j in idx_tail[key]['ref']:
if q==j:break
if dist(q,j,0)==1:
subpair_T.append(q)
subpair_N.append(j)
return (subpair_T,subpair_N)
def contig2kmer(contig,rev=False):
kmerlst=[]
if rev==False:contig=contig
else:contig=str(Seq(contig).reverse_complement())
for i in range(len(contig)-30):
kmerlst.append(contig[i:(i+31)])
return kmerlst
def maxoverlap(s1,s2):
d = SequenceMatcher(None,s1,s2)
pos_a, pos_b, size = d.find_longest_match(0, len(s1), 0, len(s2))
return (pos_a,pos_b,size)
def assemDNA(lst):
res = lst[0]+'N'*100
for tarkmer in lst[1:]:
pos_s,pos_t,size = maxoverlap(res, tarkmer)
if size<10:continue
res=res.replace(res[pos_s:pos_s+31-pos_t],tarkmer)
return (res.strip('N'))
def CAP(contig,reads):
randseed=np.random.randint(1000,1000000)
fa=NamedTemporaryFile(delete=False)
out=open(fa.name,'w')
for i in reads:
if contig in i:return ('NA')
else:
out.write('>%s\n'%i)
out.write(i+'\n')
out.close()
subprocess.call("cap3 %s -x %s > /dev/null"%(fa.name,randseed),shell=True)
if os.path.exists('%s.%s.contigs'%(fa.name,randseed)):infered_contigs_nb=int(os.popen("grep Contig %s.%s.contigs|wc -l"%(fa.name,randseed)).readline().strip())
else:
print (contig,reads,' failed to infer ref')
infered_contigs_nb=0
if infered_contigs_nb==0:return('')
putative_ref = {}
for line in open(r'%s.%s.contigs'%(fa.name,randseed)):
if line.startswith('>'):
key = line.split(' ')[0]
putative_ref[key] = ''
else:
putative_ref[key]+=line.strip()
ref_qual = {}
for line in open(r'%s.%s.contigs.qual'%(fa.name,randseed)):
if line.startswith('>'):
key = line.split(' ')[0]
ref_qual[key] = []
else:
ref_qual[key]+=line.split()
ref=[]
bestref,bestscore='',0
for k,v in ref_qual.items():
score=np.mean(np.array(ref_qual[k]).astype(int))
if score>bestscore:
bestscore=score
bestref=putative_ref[k]
if score>50:ref.append(putative_ref[k])
if len(ref)==0:#use the putative ref with largest score
ref=[bestref]
os.system('rm %s.%s*'%(fa.name,randseed))
if len(ref)==1:putative_ref=(ref[0])
elif len(ref)>=2:
putative_ref=bestref
plus,minus=maxoverlap(contig,putative_ref)[2],maxoverlap(str(Seq(contig).reverse_complement()),putative_ref)[2]
if plus>minus:return putative_ref
else:return str(Seq(putative_ref).reverse_complement())
def nestDict(set1,set2):
idx=defaultdict(list)
keys_h,keys_t=[],[]
for q in set1:
keys_h.append(q[:min(int(len(q)/2),60)])
keys_t.append(q[max(-60,-int(len(q)/2)):])
for p in set2:
for k in keys_h:
if len(idx[k])>50:continue
if k in p:
idx[k].append(p[max(0,p.index(k)-15):p.index(k)+len(k)*2+15])
break
for k in keys_t:
if len(idx[k])>50:continue
for k in p:
idx[k].append(p[max(p.index(k)-len(k),0):p.index(k)+len(k)+15])
return idx
def contig_read_hash(fi,cor_reads):
with open(fi)as f:
contigs=list(map(lambda x:x.strip(),f))
contig_reads=defaultdict(list)
with open(cor_reads) as f:
while True:
line=f.readline()
if not line:break
if line.startswith('>')is False:continue
seq=f.readline().strip()
querys=re.findall('([ATCG]+)=',line)
if len(querys)==0:continue
for query in querys:
if len(contig_reads[query])>10:
continue
query=query.split('=')[0]#useless
if query not in contigs:continue
seq_s1,query_s1,len_overlap1=maxoverlap(seq,query)
seqrv=str(Seq(seq).reverse_complement())
seq_s2,query_s2,len_overlap2=maxoverlap(seqrv,query)
if len_overlap1>len_overlap2:seq,seq_s,query_s,len_overlap=seq,seq_s1,query_s1,len_overlap1
else:seq,seq_s,query_s,len_overlap=seqrv,seq_s2,query_s2,len_overlap2
if len_overlap<15:continue
contig_reads[query].append(seq[max(0,seq_s-query_s-30):min(len(seq),seq_s+len(query)-query_s+30)])
return (contig_reads)
def infer_ref(line):
contig=line[0]
kmers=contig2kmer(contig)
sp_case,cov_case,sp_control,cov_control,refpairs=contig_sp_cov.loc[contig].tolist()
refpairs=refpairs.split(',')
if len(refpairs)==1:return (refpairs[0],sp_case,cov_case,sp_control,cov_control)
try:
refseq=Assembly([Dseqrecord(i) for i in refpairs],limit=15).assemble_linear(max_nodes=3)[0].seq.watson
if maxoverlap(contig,refseq)[2]<15:refseq=assemDNA(refpairs)#for sake of low complexity sequences
except:refseq=assemDNA(refpairs)
if maxoverlap(contig,refseq)[2]<15:refseq=str(Seq(refseq).reverse_complement())
return (refseq,sp_case,cov_case,sp_control,cov_control)
def infer_ref_unpair(line,unpair_reads_dict):
contig,refseq=line[0],''
kmers=contig2kmer(contig)
sp_case,cov_case,sp_control,cov_control,refpairs=contig_sp_cov.loc[contig].tolist()
refpairs=refpairs.split(',')
related_reads=unpair_reads_dict[contig]
refseq='NA'
if len(refpairs)>2:#indels should have no more than 2 paired refs.(head and tail)
try:
refseq=Assembly([Dseqrecord(i) for i in refpairs],limit=15).assemble_linear(max_nodes=3)[0].seq.watson
except:refseq=assemDNA(refpairs)
if len(related_reads)>lowdepth/2 and len(refseq)<len(contig):refseq=CAP(contig,related_reads)
if maxoverlap(contig,refseq)[2]<15:refseq=str(Seq(refseq).reverse_complement())
return (refseq,sp_case,cov_case,sp_control,cov_control)
def ana_contigs(fi,paired=True):
if fi.split('/')[-1].startswith('x00'):contigs=pd.read_csv(fi,header=0,index_col=None,sep='\t')
else:contigs=pd.read_csv(fi,header=None,index_col=None,sep='\t')
contigs.columns=['contig']
if paired==False:unpair_reads_dict=contig_read_hash(fi,cor_reads)
a,b,c,d,e=[],[],[],[],[]
for i in trange(contigs.shape[0]):
if paired==False:aa,bb,cc,dd,ee=infer_ref_unpair(contigs.loc[i],unpair_reads_dict)
else:aa,bb,cc,dd,ee=infer_ref(contigs.loc[i])
a.append(aa)
b.append(bb)
c.append(cc)
d.append(dd)
e.append(ee)
contigs['putative_ref'],contigs['sp_case'],contigs['cov_case'],contigs['sp_control'],contigs['cov_control']=a,b,c,d,e#zip(*contigs.apply(infer_ref_unpair,1,args=(unpair_reads_dict,)))
if paired==False:contigs.to_csv('%s/contig_unpair/result%s.csv'%(outdir,fi.split('/')[-1]),header=False,index=False,sep='\t')
else:contigs.to_csv('%s/contig_pair/result%s.csv'%(outdir,fi.split('/')[-1]),header=False,index=False,sep='\t')
def filter_unpaired_contigs(fi):
#fileter low depth contig_unpaired
seed=NamedTemporaryFile(delete=True).name.split('/')[-1]
out=open('%s/contig_unpair/passedcontig_%s.fa'%(outdir,seed),'w')
out2=open('%s/contig_unpair/contigs_unpaired_%s'%(outdir,seed),'w')
weird_contig=open('%s/contig_unpair/FailedToInferRef_%s.txt'%(outdir,seed),'w')
mutkmerpool=kmerpair.index.tolist()
contig_unpair=pd.read_csv(fi,header=None,index_col=None,sep='\t')
if contig_unpair.shape[1]==4:contig_unpair.columns=['nb_kmer','contig','tag','Pvalue']
elif contig_unpair.shape[1]==1:contig_unpair.columns=['contig']
for contig in tqdm(contig_unpair.contig.tolist()):
headtailkmers=[contig[:31],contig[-31:]]
refkmers=kmerpair.reindex(headtailkmers)[1].dropna().tolist()
if len(refkmers)<2:
weird_contig.write(contig+'\n')
continue
out.write('>%s\n%s\n'%(contig,contig))
out2.write(contig+'\n')
out2.close()
out.close()
weird_contig.close()
def usedkmers(fi):
tag=fi.split('/')[-1].strip('.txt.gz')
subprocess.call(r"""less %s/contig_pair/usedkmers|awk '{print ">contig_"NR"\n"$1}' > %s/contig_pair/usedkmers_%s.fa"""%(outdir,outdir,tag),shell=True)
subprocess.call(r"""jellyfish query -s %s/contig_pair/usedkmers_%s.fa %s -o %s/contig_pair/usedkmers_%s"""%(outdir,tag,fi.replace('.txt.gz','.jf'),outdir,tag),shell=True,executable='/bin/bash')
kmers=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,fi.split('/')[-1].replace('.txt.gz','')),header=None,index_col=None,sep=' ')
kmers_rv=pd.DataFrame({0:[str(Seq(i).reverse_complement()) for i in kmers[0]],1:kmers[1]})
kmers=pd.concat([kmers,kmers_rv]).drop_duplicates()
kmers.index=kmers[0]
del kmers[0]
kmers.to_csv('%s/contig_pair/usedkmers_%s'%(outdir,fi.split('/')[-1].replace('.txt.gz','')),header=False,index=True,sep=' ')
def OnlyKeepMaxRef():
kmerpair=pd.read_csv('%s/contig_pair/kmerpair.csv'%outdir,header=None,index_col=None,sep='\t')
kmerpair.columns=['mut','wild']
wildkmercount=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','')),header=None,index_col=None,sep=' ')
wildkmercount.columns=['wild','count']
kmerpair_wildcount=pd.merge(kmerpair,wildkmercount,left_on='wild',right_on='wild',how='left')
kmerpair_wildcount=kmerpair_wildcount.sort_values('count',ascending=False).groupby('mut').first()
kmerpair_wildcount.to_csv('%s/contig_pair/kmerpair.csv'%outdir,header=False,index=True,sep='\t')
def shrink():
contigs=pd.read_csv('%s/merged_contigs/contigs_allspkmers'%outdir,header=0,index_col=None,sep='\t')
length=contigs.contig.apply(lambda x:len(x),1)
contigs=contigs[(length>=31+nb_kmers) & (length<100)]
contigs.to_csv('%s/merged_contigs/contigs_allspkmers'%outdir,header=True,index=False,sep='\t')
def comm(param):
threads,kmerfile_T,kmerfile_N,wild1,wild2,lowdepth,cutoff,support,nb_kmers,distance=sys.argv[1:]
threads,lowdepth,cutoff,support,nb_kmers,distance=int(threads),int(lowdepth),float(cutoff),int(support),int(nb_kmers),int(distance)
samid=kmerfile_T.split('/')[-1].replace('.txt.gz','')
outdir=os.path.dirname(os.path.dirname(kmerfile_T))
if param=='12':
subprocess.call(r'''comm -12 <(%s %s|awk '{if($2>%s){print $1}}') <(%s %s |awk '{if($2>%s){print $1}}') > %s/case_specific_kmers/shared_kmers'''%(compress,kmerfile_T,support,compress,kmerfile_N,support,outdir),shell=True,executable='/bin/bash')
elif param=='23':
subprocess.call(r'''comm -23 <(%s %s|awk '{if($2>%s){print $1}}') <(%s %s |awk '{if($2>0){print $1}}') > %s/case_specific_kmers/specific_kmer'''%(compress,kmerfile_T,support,compress,kmerfile_N,outdir),shell=True,executable='/bin/bash')
elif param=='homo':
subprocess.call(r'''%s %s|awk '{if($2>%s){print $1}}' > %s/case_specific_kmers/shared_kmers'''%(compress,kmerfile_N,lowdepth,outdir),shell=True,executable='/bin/bash')#adaptable to homo variant
def Cal_sp_cov(contigs):
col1,col2=[],[]
for contig in contigs:
kmers=contig2kmer(contig)
col1+=[contig]*len(kmers)
col2+=kmers
df=pd.DataFrame({'contig':col1,'kmers':col2})
df['refs']=kmerpair.reindex(df.kmers)[1].tolist()
df['sp_T']=mutkmers.reindex(df.kmers)[1].tolist()
df['allel2_T']=mutkmers.reindex(df.refs)[1].tolist()
df['sp_N']=wildkmers.reindex(df.kmers)[1].tolist()
df['allel2_N']=wildkmers.reindex(df.refs)[1].tolist()
rawdf=df.dropna().copy()
df=df.groupby('contig').median()
df['cov_T']=df.sp_T+df.allel2_T
df['cov_N']=df.sp_N+df.allel2_N
df=df[['sp_T','cov_T','sp_N','cov_N']].dropna().astype(int)
df=df[(df.sp_T>=support)&(df.cov_T>=lowdepth)]
df['refpairs']=rawdf.groupby('contig')['refs'].apply(lambda x:','.join(x))
df_rv=df.copy()
df_rv.index=[str(Seq(i).reverse_complement()) for i in df.index]
df=pd.concat([df,df_rv])
df=df.loc[~df.index.duplicated(keep='first')]
return df
def RemoveFP_via_control(contigs):
ext_kmers=defaultdict(list)
for contig in contigs:
if len(contig)>60:continue
for c in 'ATCG':
ext_kmers[contig].append(c+contig[:30])
ext_kmers[contig].append(contig[-30:]+c)
fa=NamedTemporaryFile(delete=False)
ext_kmers_count=NamedTemporaryFile(delete=False)
out=open(fa.name+'.fa','w')
for i_ in ext_kmers.values():
for i in i_:
out.write('>%s\n'%i)
out.write(i+'\n')
out.close()
subprocess.call(r"""jellyfish query -s %s.fa %s/case_specific_kmers/%s -o %s/variant_result/ext_kmers_count"""%(fa.name,outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','.jf'),outdir),shell=True,executable='/bin/bash')
if __name__ == '__main__':
threads,kmerfile_T,kmerfile_N,wild1,wild2,lowdepth,cutoff,support,nb_kmers,distance=sys.argv[1:]
threads,lowdepth,cutoff,support,nb_kmers,distance=int(threads),int(lowdepth),float(cutoff),int(support),int(nb_kmers),int(distance)
samid=kmerfile_T.split('/')[-1].replace('.txt.gz','')
outdir=os.path.dirname(os.path.dirname(kmerfile_T))
os.system('cd ../mergeTags;make')
os.system('mv ../mergeTags/mergeTags ./')
################# extract case specific kmers #######################
nb_kmers_eachthread=10000000
fpath='%s/case_specific_kmers/shared_kmers_count'%outdir
if os.path.exists('%s/variant_result/SNV_alignments.vcf'%outdir) is False:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'extract specific kmers')
pool=Pool(2)
pool.map(comm,['23','homo'])# homo mode for calling homozygote and 12 mode for somatic
pool.close()
pool.join()
if int(os.popen('wc -l %s/case_specific_kmers/specific_kmer'%outdir).readline().strip().split()[0])>50000000:os._exit(0)
os.system("echo 'tag\tpvalue' > %s/case_specific_kmers/specific_kmer_fix"%outdir)
print("./mergeTags -k 31 -m 25 -n %s/case_specific_kmers/specific_kmer_fix 2>/dev/null|awk '{if($1>%s){print $0}}'|gzip -c > %s/merged_contigs/contigs.gz;gunzip -c %s/merged_contigs/contigs.gz > %s/merged_contigs/contigs_allspkmers"%(outdir,nb_kmers,outdir,outdir,outdir))
subprocess.call(r"""awk '{print $1"\t0"}' %s/case_specific_kmers/specific_kmer >> %s/case_specific_kmers/specific_kmer_fix"""%(outdir,outdir),shell=True)
subprocess.call(r"./mergeTags -k 31 -m 25 -n %s/case_specific_kmers/specific_kmer_fix 2>/dev/null|awk '{if($1>%s){print $0}}'|gzip -c > %s/merged_contigs/contigs.gz;gunzip -c %s/merged_contigs/contigs.gz > %s/merged_contigs/contigs_allspkmers"%(outdir,nb_kmers,outdir,outdir,outdir),shell=True)
subprocess.call(r"""less %s/case_specific_kmers/shared_kmers|awk '{print ">kmer"NR"\n"$1}' > %s/case_specific_kmers/shared_kmers.fa"""%(outdir,outdir),shell=True)
subprocess.call(r"""jellyfish query -s %s/case_specific_kmers/shared_kmers.fa %s/case_specific_kmers/%s -o %s/case_specific_kmers/shared_kmers_count"""%(outdir,outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','.jf'),outdir),shell=True)
#shrink the contigs_allspkmers and remove useless kmers from the specific_kmer_fix
if os.path.exists('%s/contig_pair/contigs_pairedspkmers'%outdir) is False:
shrink()
if platform.system()=='Linux':
os.system("""rm %s/x*_N;split -l %s -d --additional-suffix=%s_N %s/case_specific_kmers/shared_kmers_count;mv x*%s_N %s"""%(outdir,min(10000000,nb_kmers_eachthread),samid,outdir,samid,outdir))
else:
os.system("""rm %s/x*_N;split -l %s %s/case_specific_kmers/shared_kmers_count"""%(outdir,min(10000000,nb_kmers_eachthread),outdir))
for xf in os.popen('ls ./x*').readlines():
os.system("mv %s %s"%(xf.strip(),outdir+'/'+xf.strip()+samid+'_N'))
fileidxs=[i.strip() for i in os.popen('ls %s/x*%s_N'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'indexing and pairing kmers')
with open('%s/case_specific_kmers/specific_kmer_fix'%outdir)as f:
spekmer=list(map(lambda x:x.strip().split()[0],f))[1:]#the 1st line is tag\tpvalue
spekmer=spekmer+[str(Seq(i).reverse_complement()) for i in spekmer]#add revcomp to the specific kmer list, in case kmerpair misses true pairs.remove and test
with Manager() as manager:
################# pairing kmers from case and control #####################
lock=manager.Lock()
global pair_T,pair_N
pair_T=manager.list()
pair_N=manager.list()
ncores=min(threads,len(fileidxs))
pool=Pool(ncores)
singlethread = partial(createindex, lock)
pool.map(singlethread, fileidxs)
pool.close()
pool.join()
'''
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'adding mut kmers into the hash')#what's the purpose of this part?
idx_head,idx_tail=defaultdict(lambda: defaultdict(list)),defaultdict(lambda: defaultdict(list))
for q in spekmer:
idx_head[q[:15]]['mut'].append(q)
idx_tail[q[-15:]]['mut'].append(q)
subpair_T,subpair_N=pairkmers(idx_head,idx_tail)
pair_T+=subpair_T
pair_N+=subpair_N
print (len(subpair_T))
'''
np.savetxt('%s/contig_pair/pair.txt'%outdir, np.stack([pair_T,pair_N]).T,delimiter="\t",fmt="%s")
out=open('%s/contig_pair/kmer_T'%outdir,'w')
kmerpair=pd.read_csv('%s/contig_pair/pair.txt'%outdir,header=None,index_col=None,sep='\t')
pair_T_rv=[str(Seq(i).reverse_complement()) for i in pair_T]
for p in set(pair_T).difference(set(pair_T_rv)):out.write(p+'\n')
out.close()
kmerpair_rv=pd.DataFrame({0:[str(Seq(i).reverse_complement()) for i in kmerpair[0]],1:[str(Seq(i).reverse_complement()) for i in kmerpair[1]]})
kmerpair=pd.concat([kmerpair,kmerpair_rv]).drop_duplicates()
kmerpair.index=kmerpair[0]
del kmerpair[0]
kmerpair.to_csv('%s/contig_pair/kmerpair.csv'%outdir,header=False,index=True,sep='\t')
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'shrink useful kmers from kmerpair')
out=open('%s/contig_pair/usedkmers'%outdir,'w')
for i in set(list(pair_T)+list(pair_N)):
out.write(i+'\n')
out.close()
pool=Pool(2)
pool.map(usedkmers, [kmerfile_T,kmerfile_N])
pool.close()
pool.join()
OnlyKeepMaxRef()
del pair_T_rv,spekmer,kmerpair_rv
wildkmers=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,kmerfile_N.split('/')[-1].replace('.txt.gz','')),header=None,index_col=0,sep=' ')
mutkmers=pd.read_csv('%s/contig_pair/usedkmers_%s'%(outdir,kmerfile_T.split('/')[-1].replace('.txt.gz','')),header=None,index_col=0,sep=' ')
contig_all=pd.read_csv('%s/merged_contigs/contigs_allspkmers'%outdir,header=0,index_col=None,sep='\t')
kmerpair=pd.read_csv('%s/contig_pair/kmerpair.csv'%outdir,header=None,index_col=0,sep='\t')
contig_sp_cov=Cal_sp_cov(contig_all.contig)
del wildkmers,mutkmers
if os.path.exists('%s/contig_pair/contigs_pairedspkmers'%outdir) is False:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'merging into paired contigs')
os.system("echo 'tag\tpvalue' > %s/contig_pair/specific_kmer_fix"%outdir)
subprocess.call(r"""awk '{print $1"\t0"}' %s/contig_pair/kmer_T >> %s/contig_pair/specific_kmer_fix"""%(outdir,outdir),shell=True)
subprocess.call(r"./mergeTags -k 31 -m 25 -n %s/contig_pair/specific_kmer_fix 2>/dev/null|awk '{if($1>%s){print $0}}'|gzip -c > %s/contig_pair/contigs.gz;gunzip -c %s/contig_pair/contigs.gz |cut -f 2 > %s/contig_pair/contigs_pairedspkmers"%(outdir,nb_kmers,outdir,outdir,outdir),shell=True)
contig_all=contig_all[contig_all.contig.isin(contig_sp_cov.index)]
contig_pair=pd.read_csv('%s/contig_pair/contigs_pairedspkmers'%outdir,header=0,index_col=None,sep='\t')
contig_pair=contig_pair[contig_pair.contig.isin(contig_sp_cov.index)]
contig_pair_plus_rv=contig_pair.contig.tolist()+[str(Seq(i).reverse_complement()) for i in contig_pair.contig.tolist()]
pairidx=contig_all.contig.isin(contig_pair_plus_rv)#leave too many unpaired contigs to bbduk step
contig_pair=contig_all[pairidx]
contig_pair.contig.to_csv('%s/contig_pair/contigs_pairedspkmers'%outdir,header=True,index=False,sep='\t')
contig_unpair=contig_all[-pairidx]
print ('orignial contigs: %s;paired contigs: %s; unpaired contigs: %s'%(contig_all.shape[0],contig_pair.shape[0],contig_unpair.shape[0]))
for name, size in sorted(((name, sys.getsizeof(value)) for name, value in locals().items()),key= lambda x: -x[1])[:10]:
if size > 100000:print("{:>30}: {:>8}".format(name, sizeof_fmt(size)))
################## call variants from paired contigs #######################
nb_contigs_eachthread=ceil(contig_pair.shape[0]/threads)
if nb_contigs_eachthread>0 and os.path.exists('%s/variant_result/SNV_results_pair.txt'%outdir) is False:
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for %s contig_paired are started'%contig_pair.shape[0])
subprocess.call("""rm %s/x*;split -l %s -d --additional-suffix=pair_%s %s/contig_pair/contigs_pairedspkmers;mv x*pair_%s %s"""%(outdir,nb_contigs_eachthread,samid,outdir,samid,outdir),shell=True,executable='/bin/bash')
fileidxs=[i.strip() for i in os.popen('ls %s/x*pair_%s|grep -v unpair'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s chunks are undergoing analysis'%len(fileidxs))
pool=Pool(min(len(fileidxs),threads))
pool.starmap(ana_contigs, zip(fileidxs,[True]*len(fileidxs)))
pool.close()
pool.join()
subprocess.call(r'''cat %s/contig_pair/result* > %s/variant_result/SNV_results_pair.txt;rm %s/contig_pair/result*'''%(outdir,outdir,outdir),shell=True)
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for contig_paired are finished')
################## call variants from unpaired contigs ########################
try: nb_unpair=contig_unpair.shape[0]
except: nb_unpair=0
if nb_unpair>0 and os.path.exists('%s/variant_result/SNV_results_unpair.txt'%outdir) is False:
contig_unpair.to_csv('%s/contig_unpair/contigs_unpaired.pass'%outdir,header=False,index=False,sep='\t')
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for %s contig_unpaired are started'%(contig_unpair.shape[0]))
nb_contigs_eachthread=ceil(int(os.popen('wc -l %s/contig_unpair/contigs_unpaired.pass'%outdir).readline().strip().split()[0])/threads)
subprocess.call("""rm x*passed_%s;split -l %s -d --additional-suffix=passed_%s %s/contig_unpair/contigs_unpaired.pass;mv x*passed_%s %s"""%(samid,nb_contigs_eachthread,samid,outdir,samid,outdir),shell=True,executable='/bin/bash')
fileidxs=[i.strip() for i in os.popen('ls %s/x*passed_%s'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s chunks of contig_unpaired undergo filtering'%len(fileidxs))
pool=Pool(min(len(fileidxs),threads))
pool.map(filter_unpaired_contigs, fileidxs)
pool.close()
pool.join()
os.system("cat %s/contig_unpair/passedcontig_* > %s/contig_unpair/passedcontig.fa;echo 'contig' > %s/contig_unpair/contigs_unpaired;cat %s/contig_unpair/contigs_unpaired_* >> %s/contig_unpair/contigs_unpaired;cat %s/contig_unpair/FailedToInferRef_* > %s/contig_unpair/FailedToInferRef.txt"%(outdir,outdir,outdir,outdir,outdir,outdir,outdir))
if os.path.exists('%s/variant_result/SNV_results_unpair.txt'%outdir) is False:
nb_contigs_eachthread=ceil(int(os.popen('wc -l %s/contig_unpair/contigs_unpaired'%outdir).readline().strip().split()[0])/threads)
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s/%s unpaired contigs are dumped to bbduk'%(int(os.popen('wc -l %s/contig_unpair/contigs_unpaired'%outdir).readline().strip().split()[0])-1,contig_unpair.shape[0]))
#retriev reads from fastq
os.system("bbduk.sh in=%s in2=%s ref=%s k=31 mm=f rcomp=t outm=%s fastawrap=500 rename=t hdist=%s speed=0 2>/dev/null"%(wild1,wild2,'%s/contig_unpair/passedcontig.fa'%outdir,'%s/contig_unpair/unpair_contigs_reads.fa'%outdir,distance))
cor_reads='%s/contig_unpair/unpair_contigs_reads.fa'%outdir
try:nb_weird_contig=int(os.popen('wc -l %s/contig_unpair/FailedToInferRef.txt'%outdir).readline().strip().split()[0])
except:nb_weird_contig=0
subprocess.call(r"""split -l %s -d --additional-suffix=unpair_%s %s/contig_unpair/contigs_unpaired;mv x*unpair_%s %s"""%(nb_contigs_eachthread,samid,outdir,samid,outdir),shell=True,executable='/bin/bash')
fileidxs=[i.strip() for i in os.popen('ls %s/x*unpair_%s'%(outdir,samid)).readlines()]
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'%s chunks of contig_unpaired are started'%(len(fileidxs)))
pool=Pool(min(len(fileidxs),threads))
pool.starmap(ana_contigs, zip(fileidxs,[False]*len(fileidxs)))
pool.close()
pool.join()
subprocess.call(r'''cat %s/contig_unpair/result* > %s/variant_result/SNV_results_unpair.txt;rm %s/contig_unpair/result* %s/contig_unpair/unpair_contigs_reads.fa'''%(outdir,outdir,outdir,outdir),shell=True)
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'all threads for contig_unpaired are finished')
##################### prepare final results ###################
print (time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()),'prepare the final result')
subprocess.call(r'''cat %s/variant_result/SNV_results_* > %s/variant_result/SNV_results.txt;rm %s/x* %s/contig_unpair/*_tmp*'''%(outdir,outdir,outdir,outdir),shell=True)
result=
|
pd.read_csv('%s/variant_result/SNV_results.txt'%outdir,header=None,index_col=None,sep='\t')
|
pandas.read_csv
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
|
pd.set_option('display.max_rows', 30)
|
pandas.set_option
|
# -*- coding: utf-8 -*-
"""
Created on Tue Sep 14 10:59:05 2021
@author: franc
"""
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
import json
from collections import Counter, OrderedDict
import math
import torchtext
from torchtext.data import get_tokenizer
from googletrans import Translator
# from deep_translator import GoogleTranslator
# pip install googletrans==4.0.0rc1
import pickle
# pip install pickle-mixin
import nltk
from nltk.stem import WordNetLemmatizer
from nltk.corpus import wordnet as wn
# python -m spacy download es_core_news_sm
import spacy
import fasttext.util
import contractions
import re # libreria de expresiones regulares
import string # libreria de cadena de caracteres
import itertools
import sys
sys.path.append("/tmp/TEST")
from treetagger import TreeTagger
import pathlib
from scipy.spatial import distance
from scipy.stats import kurtosis
from scipy.stats import skew
class NLPClass:
def __init__(self):
self.numero = 1
nltk.download('wordnet')
def translations_dictionary(self, df_translate=None, path=""):
'''
It appends to a dictionary different animals names in spanish and
english languages. It adds them so that english animals names appear
in WordNet synset.
Parameters
----------
df_translate : pandas.dataframe, optional.
If it's not None, the rows are appended. Otherwise it's
initialized and then the rows are appended.
The default is None.
path : string, optional
The path where to save the pickle file with the dictionary. Unless
path is empty.
The default is "".
Returns
-------
df_translate : pandas.dataframe.
Pandas.dataframe with the new rows appended.
'''
df_auxiliar = pd.DataFrame(columns=['spanish','english'])
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yaguareté"], 'english': ["jaguar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["llama"], 'english': ["llama"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["picaflor"], 'english': ["hummingbird"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chita"], 'english': ["cheetah"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["torcaza"], 'english': ["dove"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["yacaré"], 'english': ["alligator"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["corvina"], 'english': ["croaker"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["vizcacha"], 'english': ["viscacha"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["orca"], 'english': ["killer_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["barata"], 'english': ["german_cockroach"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["coipo"], 'english': ["coypu"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cuncuna"], 'english': ["caterpillar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["carpincho"], 'english': ["capybara"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["jote"], 'english': ["buzzard"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["zorzal"], 'english': ["fieldfare"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guanaco"], 'english': ["guanaco"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["pejerrey"], 'english': ["silverside"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["mandril"], 'english': ["mandrill"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["peludo"], 'english': ["armadillo"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["chingue"], 'english': ["skunk"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["guaren"], 'english': ["brown_rat"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cata"], 'english': ["budgerigar"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["bonito"], 'english': ["atlantic_bonito"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cachalote"], 'english': ["sperm_whale"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["morena"], 'english': ["moray_eels"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["jaiba"], 'english': ["callinectes_sapidus"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(pd.DataFrame({'spanish': ["cervatillo"], 'english': ["fawn"]}), ignore_index = True)
df_auxiliar = df_auxiliar.append(
|
pd.DataFrame({'spanish': ["mulita"], 'english': ["nine-banded_armadillo"]})
|
pandas.DataFrame
|
from collections import OrderedDict
from datetime import timedelta
import numpy as np
import pytest
from pandas.core.dtypes.dtypes import CategoricalDtype, DatetimeTZDtype
import pandas as pd
from pandas import (
Categorical,
DataFrame,
Series,
Timedelta,
Timestamp,
_np_version_under1p14,
concat,
date_range,
option_context,
)
from pandas.core.arrays import integer_array
import pandas.util.testing as tm
def _check_cast(df, v):
"""
Check if all dtypes of df are equal to v
"""
assert all(s.dtype.name == v for _, s in df.items())
class TestDataFrameDataTypes:
def test_concat_empty_dataframe_dtypes(self):
df = DataFrame(columns=list("abc"))
df["a"] = df["a"].astype(np.bool_)
df["b"] = df["b"].astype(np.int32)
df["c"] = df["c"].astype(np.float64)
result = pd.concat([df, df])
assert result["a"].dtype == np.bool_
assert result["b"].dtype == np.int32
assert result["c"].dtype == np.float64
result = pd.concat([df, df.astype(np.float64)])
assert result["a"].dtype == np.object_
assert result["b"].dtype == np.float64
assert result["c"].dtype == np.float64
def test_empty_frame_dtypes_ftypes(self):
empty_df = pd.DataFrame()
tm.assert_series_equal(empty_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(empty_df.ftypes, pd.Series(dtype=np.object))
nocols_df = pd.DataFrame(index=[1, 2, 3])
tm.assert_series_equal(nocols_df.dtypes, pd.Series(dtype=np.object))
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(nocols_df.ftypes, pd.Series(dtype=np.object))
norows_df = pd.DataFrame(columns=list("abc"))
tm.assert_series_equal(
norows_df.dtypes, pd.Series(np.object, index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_df.ftypes, pd.Series("object:dense", index=list("abc"))
)
norows_int_df = pd.DataFrame(columns=list("abc")).astype(np.int32)
tm.assert_series_equal(
norows_int_df.dtypes, pd.Series(np.dtype("int32"), index=list("abc"))
)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(
norows_int_df.ftypes, pd.Series("int32:dense", index=list("abc"))
)
odict = OrderedDict
df = pd.DataFrame(odict([("a", 1), ("b", True), ("c", 1.0)]), index=[1, 2, 3])
ex_dtypes = pd.Series(
odict([("a", np.int64), ("b", np.bool), ("c", np.float64)])
)
ex_ftypes = pd.Series(
odict([("a", "int64:dense"), ("b", "bool:dense"), ("c", "float64:dense")])
)
tm.assert_series_equal(df.dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df.ftypes, ex_ftypes)
# same but for empty slice of df
tm.assert_series_equal(df[:0].dtypes, ex_dtypes)
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
tm.assert_series_equal(df[:0].ftypes, ex_ftypes)
def test_datetime_with_tz_dtypes(self):
tzframe = DataFrame(
{
"A": date_range("20130101", periods=3),
"B": date_range("20130101", periods=3, tz="US/Eastern"),
"C": date_range("20130101", periods=3, tz="CET"),
}
)
tzframe.iloc[1, 1] = pd.NaT
tzframe.iloc[1, 2] = pd.NaT
result = tzframe.dtypes.sort_index()
expected = Series(
[
np.dtype("datetime64[ns]"),
DatetimeTZDtype("ns", "US/Eastern"),
DatetimeTZDtype("ns", "CET"),
],
["A", "B", "C"],
)
tm.assert_series_equal(result, expected)
def test_dtypes_are_correct_after_column_slice(self):
# GH6525
df = pd.DataFrame(index=range(5), columns=list("abc"), dtype=np.float_)
odict = OrderedDict
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
tm.assert_series_equal(
df.iloc[:, 2:].dtypes, pd.Series(odict([("c", np.float_)]))
)
tm.assert_series_equal(
df.dtypes,
pd.Series(odict([("a", np.float_), ("b", np.float_), ("c", np.float_)])),
)
def test_select_dtypes_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=[np.number])
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number], exclude=["timedelta"])
ei = df[["b", "c", "d"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude=["timedelta"])
ei = df[["b", "c", "d", "f"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetime64"])
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=["datetimetz"])
ei = df[["h", "i"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include=["period"])
def test_select_dtypes_exclude_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
}
)
re = df.select_dtypes(exclude=[np.number])
ee = df[["a", "e"]]
tm.assert_frame_equal(re, ee)
def test_select_dtypes_exclude_include_using_list_like(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
exclude = (np.datetime64,)
include = np.bool_, "integer"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "c", "e"]]
tm.assert_frame_equal(r, e)
exclude = ("datetime",)
include = "bool", "int64", "int32"
r = df.select_dtypes(include=include, exclude=exclude)
e = df[["b", "e"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_include_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number)
ei = df[["b", "c", "d", "k"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="datetime64")
ei = df[["g"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include="category")
ei = df[["f"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(include="period")
def test_select_dtypes_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(exclude=np.number)
ei = df[["a", "e", "f", "g", "h", "i", "j"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(exclude="category")
ei = df[["a", "b", "c", "d", "e", "g", "h", "i", "j", "k"]]
tm.assert_frame_equal(ri, ei)
with pytest.raises(NotImplementedError, match=r"^$"):
df.select_dtypes(exclude="period")
def test_select_dtypes_include_exclude_using_scalars(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude="floating")
ei = df[["b", "c", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_include_exclude_mixed_scalars_lists(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.Categorical(list("abc")),
"g": pd.date_range("20130101", periods=3),
"h": pd.date_range("20130101", periods=3, tz="US/Eastern"),
"i": pd.date_range("20130101", periods=3, tz="CET"),
"j": pd.period_range("2013-01", periods=3, freq="M"),
"k": pd.timedelta_range("1 day", periods=3),
}
)
ri = df.select_dtypes(include=np.number, exclude=["floating", "timedelta"])
ei = df[["b", "c"]]
tm.assert_frame_equal(ri, ei)
ri = df.select_dtypes(include=[np.number, "category"], exclude="floating")
ei = df[["b", "c", "f", "k"]]
tm.assert_frame_equal(ri, ei)
def test_select_dtypes_duplicate_columns(self):
# GH20839
odict = OrderedDict
df = DataFrame(
odict(
[
("a", list("abc")),
("b", list(range(1, 4))),
("c", np.arange(3, 6).astype("u1")),
("d", np.arange(4.0, 7.0, dtype="float64")),
("e", [True, False, True]),
("f", pd.date_range("now", periods=3).values),
]
)
)
df.columns = ["a", "a", "b", "b", "b", "c"]
expected = DataFrame(
{"a": list(range(1, 4)), "b": np.arange(3, 6).astype("u1")}
)
result = df.select_dtypes(include=[np.number], exclude=["floating"])
tm.assert_frame_equal(result, expected)
def test_select_dtypes_not_an_attr_but_still_valid_dtype(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
df["g"] = df.f.diff()
assert not hasattr(np, "u8")
r = df.select_dtypes(include=["i8", "O"], exclude=["timedelta"])
e = df[["a", "b"]]
tm.assert_frame_equal(r, e)
r = df.select_dtypes(include=["i8", "O", "timedelta64[ns]"])
e = df[["a", "b", "g"]]
tm.assert_frame_equal(r, e)
def test_select_dtypes_empty(self):
df = DataFrame({"a": list("abc"), "b": list(range(1, 4))})
msg = "at least one of include or exclude must be nonempty"
with pytest.raises(ValueError, match=msg):
df.select_dtypes()
def test_select_dtypes_bad_datetime64(self):
df = DataFrame(
{
"a": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(include=["datetime64[D]"])
with pytest.raises(ValueError, match=".+ is too specific"):
df.select_dtypes(exclude=["datetime64[as]"])
def test_select_dtypes_datetime_with_tz(self):
df2 = DataFrame(
dict(
A=Timestamp("20130102", tz="US/Eastern"),
B=Timestamp("20130603", tz="CET"),
),
index=range(5),
)
df3 = pd.concat([df2.A.to_frame(), df2.B.to_frame()], axis=1)
result = df3.select_dtypes(include=["datetime64[ns]"])
expected = df3.reindex(columns=[])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [str, "str", np.string_, "S1", "unicode", np.unicode_, "U1"]
)
@pytest.mark.parametrize("arg", ["include", "exclude"])
def test_select_dtypes_str_raises(self, dtype, arg):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "string dtypes are not allowed"
kwargs = {arg: [dtype]}
with pytest.raises(TypeError, match=msg):
df.select_dtypes(**kwargs)
def test_select_dtypes_bad_arg_raises(self):
df = DataFrame(
{
"a": list("abc"),
"g": list("abc"),
"b": list(range(1, 4)),
"c": np.arange(3, 6).astype("u1"),
"d": np.arange(4.0, 7.0, dtype="float64"),
"e": [True, False, True],
"f": pd.date_range("now", periods=3).values,
}
)
msg = "data type.*not understood"
with pytest.raises(TypeError, match=msg):
df.select_dtypes(["blargy, blarg, blarg"])
def test_select_dtypes_typecodes(self):
# GH 11990
df = tm.makeCustomDataframe(30, 3, data_gen_f=lambda x, y: np.random.random())
expected = df
FLOAT_TYPES = list(np.typecodes["AllFloat"])
tm.assert_frame_equal(df.select_dtypes(FLOAT_TYPES), expected)
def test_dtypes_gh8722(self, float_string_frame):
float_string_frame["bool"] = float_string_frame["A"] > 0
result = float_string_frame.dtypes
expected = Series(
{k: v.dtype for k, v in float_string_frame.items()}, index=result.index
)
tm.assert_series_equal(result, expected)
# compat, GH 8722
with option_context("use_inf_as_na", True):
df = DataFrame([[1]])
result = df.dtypes
tm.assert_series_equal(result, Series({0: np.dtype("int64")}))
def test_ftypes(self, mixed_float_frame):
frame = mixed_float_frame
expected = Series(
dict(
A="float32:dense",
B="float32:dense",
C="float16:dense",
D="float64:dense",
)
).sort_values()
# GH 26705 - Assert .ftypes is deprecated
with tm.assert_produces_warning(FutureWarning):
result = frame.ftypes.sort_values()
tm.assert_series_equal(result, expected)
def test_astype_float(self, float_frame):
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
casted = float_frame.astype(np.int32)
expected = DataFrame(
float_frame.values.astype(np.int32),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
float_frame["foo"] = "5"
casted = float_frame.astype(int)
expected = DataFrame(
float_frame.values.astype(int),
index=float_frame.index,
columns=float_frame.columns,
)
tm.assert_frame_equal(casted, expected)
def test_astype_mixed_float(self, mixed_float_frame):
# mixed casting
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float32")
_check_cast(casted, "float32")
casted = mixed_float_frame.reindex(columns=["A", "B"]).astype("float16")
_check_cast(casted, "float16")
def test_astype_mixed_type(self, mixed_type_frame):
# mixed casting
mn = mixed_type_frame._get_numeric_data().copy()
mn["little_float"] = np.array(12345.0, dtype="float16")
mn["big_float"] = np.array(123456789101112.0, dtype="float64")
casted = mn.astype("float64")
_check_cast(casted, "float64")
casted = mn.astype("int64")
_check_cast(casted, "int64")
casted = mn.reindex(columns=["little_float"]).astype("float16")
_check_cast(casted, "float16")
casted = mn.astype("float32")
_check_cast(casted, "float32")
casted = mn.astype("int32")
_check_cast(casted, "int32")
# to object
casted = mn.astype("O")
_check_cast(casted, "object")
def test_astype_with_exclude_string(self, float_frame):
df = float_frame.copy()
expected = float_frame.astype(int)
df["string"] = "foo"
casted = df.astype(int, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
df = float_frame.copy()
expected = float_frame.astype(np.int32)
df["string"] = "foo"
casted = df.astype(np.int32, errors="ignore")
expected["string"] = "foo"
tm.assert_frame_equal(casted, expected)
def test_astype_with_view_float(self, float_frame):
# this is the only real reason to do it this way
tf = np.round(float_frame).astype(np.int32)
casted = tf.astype(np.float32, copy=False)
# TODO(wesm): verification?
tf = float_frame.astype(np.float64)
casted = tf.astype(np.int64, copy=False) # noqa
def test_astype_with_view_mixed_float(self, mixed_float_frame):
tf = mixed_float_frame.reindex(columns=["A", "B", "C"])
casted = tf.astype(np.int64)
casted = tf.astype(np.float32) # noqa
@pytest.mark.parametrize("dtype", [np.int32, np.int64])
@pytest.mark.parametrize("val", [np.nan, np.inf])
def test_astype_cast_nan_inf_int(self, val, dtype):
# see gh-14265
#
# Check NaN and inf --> raise error when converting to int.
msg = "Cannot convert non-finite values \\(NA or inf\\) to integer"
df = DataFrame([val])
with pytest.raises(ValueError, match=msg):
df.astype(dtype)
def test_astype_str(self):
# see gh-9757
a = Series(date_range("2010-01-04", periods=5))
b = Series(date_range("3/6/2012 00:00", periods=5, tz="US/Eastern"))
c = Series([Timedelta(x, unit="d") for x in range(5)])
d = Series(range(5))
e = Series([0.0, 0.2, 0.4, 0.6, 0.8])
df = DataFrame({"a": a, "b": b, "c": c, "d": d, "e": e})
# Datetime-like
result = df.astype(str)
expected = DataFrame(
{
"a": list(map(str, map(lambda x: Timestamp(x)._date_repr, a._values))),
"b": list(map(str, map(Timestamp, b._values))),
"c": list(
map(
str,
map(lambda x: Timedelta(x)._repr_base(format="all"), c._values),
)
),
"d": list(map(str, d._values)),
"e": list(map(str, e._values)),
}
)
tm.assert_frame_equal(result, expected)
def test_astype_str_float(self):
# see gh-11302
result = DataFrame([np.NaN]).astype(str)
expected = DataFrame(["nan"])
tm.assert_frame_equal(result, expected)
result = DataFrame([1.12345678901234567890]).astype(str)
# < 1.14 truncates
# >= 1.14 preserves the full repr
val = "1.12345678901" if _np_version_under1p14 else "1.1234567890123457"
expected = DataFrame([val])
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("dtype_class", [dict, Series])
def test_astype_dict_like(self, dtype_class):
# GH7271 & GH16717
a = Series(date_range("2010-01-04", periods=5))
b = Series(range(5))
c = Series([0.0, 0.2, 0.4, 0.6, 0.8])
d = Series(["1.0", "2", "3.14", "4", "5.4"])
df = DataFrame({"a": a, "b": b, "c": c, "d": d})
original = df.copy(deep=True)
# change type of a subset of columns
dt1 = dtype_class({"b": "str", "d": "float32"})
result = df.astype(dt1)
expected = DataFrame(
{
"a": a,
"b": Series(["0", "1", "2", "3", "4"]),
"c": c,
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float32"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
dt2 = dtype_class({"b": np.float32, "c": "float32", "d": np.float64})
result = df.astype(dt2)
expected = DataFrame(
{
"a": a,
"b": Series([0.0, 1.0, 2.0, 3.0, 4.0], dtype="float32"),
"c": Series([0.0, 0.2, 0.4, 0.6, 0.8], dtype="float32"),
"d": Series([1.0, 2.0, 3.14, 4.0, 5.4], dtype="float64"),
}
)
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(df, original)
# change all columns
dt3 = dtype_class({"a": str, "b": str, "c": str, "d": str})
tm.assert_frame_equal(df.astype(dt3), df.astype(str))
tm.assert_frame_equal(df, original)
# error should be raised when using something other than column labels
# in the keys of the dtype dict
dt4 = dtype_class({"b": str, 2: str})
dt5 = dtype_class({"e": str})
msg = "Only a column name can be used for the key in a dtype mappings argument"
with pytest.raises(KeyError, match=msg):
df.astype(dt4)
with pytest.raises(KeyError, match=msg):
df.astype(dt5)
tm.assert_frame_equal(df, original)
# if the dtypes provided are the same as the original dtypes, the
# resulting DataFrame should be the same as the original DataFrame
dt6 = dtype_class({col: df[col].dtype for col in df.columns})
equiv = df.astype(dt6)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
# GH 16717
# if dtypes provided is empty, the resulting DataFrame
# should be the same as the original DataFrame
dt7 = dtype_class({})
result = df.astype(dt7)
tm.assert_frame_equal(df, equiv)
tm.assert_frame_equal(df, original)
def test_astype_duplicate_col(self):
a1 = Series([1, 2, 3, 4, 5], name="a")
b = Series([0.1, 0.2, 0.4, 0.6, 0.8], name="b")
a2 = Series([0, 1, 2, 3, 4], name="a")
df = concat([a1, b, a2], axis=1)
result = df.astype(str)
a1_str = Series(["1", "2", "3", "4", "5"], dtype="str", name="a")
b_str = Series(["0.1", "0.2", "0.4", "0.6", "0.8"], dtype=str, name="b")
a2_str = Series(["0", "1", "2", "3", "4"], dtype="str", name="a")
expected = concat([a1_str, b_str, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
result = df.astype({"a": "str"})
expected = concat([a1_str, b, a2_str], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype",
[
"category",
CategoricalDtype(),
CategoricalDtype(ordered=True),
CategoricalDtype(ordered=False),
CategoricalDtype(categories=list("abcdef")),
CategoricalDtype(categories=list("edba"), ordered=False),
CategoricalDtype(categories=list("edcb"), ordered=True),
],
ids=repr,
)
def test_astype_categorical(self, dtype):
# GH 18099
d = {"A": list("abbc"), "B": list("bccd"), "C": list("cdde")}
df = DataFrame(d)
result = df.astype(dtype)
expected = DataFrame({k: Categorical(d[k], dtype=dtype) for k in d})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"cls",
[
pd.api.types.CategoricalDtype,
pd.api.types.DatetimeTZDtype,
pd.api.types.IntervalDtype,
],
)
def test_astype_categoricaldtype_class_raises(self, cls):
df = DataFrame({"A": ["a", "a", "b", "c"]})
xpr = "Expected an instance of {}".format(cls.__name__)
with pytest.raises(TypeError, match=xpr):
df.astype({"A": cls})
with pytest.raises(TypeError, match=xpr):
df["A"].astype(cls)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes(self, dtype):
# GH 22578
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
expected1 = pd.DataFrame(
{
"a": integer_array([1, 3, 5], dtype=dtype),
"b": integer_array([2, 4, 6], dtype=dtype),
}
)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
tm.assert_frame_equal(df.astype(dtype).astype("float64"), df)
df = pd.DataFrame([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], columns=["a", "b"])
df["b"] = df["b"].astype(dtype)
expected2 = pd.DataFrame(
{"a": [1.0, 3.0, 5.0], "b": integer_array([2, 4, 6], dtype=dtype)}
)
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["Int64", "Int32", "Int16"])
def test_astype_extension_dtypes_1d(self, dtype):
# GH 22578
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
expected1 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
df = pd.DataFrame({"a": [1.0, 2.0, 3.0]})
df["a"] = df["a"].astype(dtype)
expected2 = pd.DataFrame({"a": integer_array([1, 2, 3], dtype=dtype)})
tm.assert_frame_equal(df, expected2)
tm.assert_frame_equal(df.astype(dtype), expected1)
tm.assert_frame_equal(df.astype("int64").astype(dtype), expected1)
@pytest.mark.parametrize("dtype", ["category", "Int64"])
def test_astype_extension_dtypes_duplicate_col(self, dtype):
# GH 24704
a1 = Series([0, np.nan, 4], name="a")
a2 = Series([np.nan, 3, 5], name="a")
df = concat([a1, a2], axis=1)
result = df.astype(dtype)
expected = concat([a1.astype(dtype), a2.astype(dtype)], axis=1)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"dtype", [{100: "float64", 200: "uint64"}, "category", "float64"]
)
def test_astype_column_metadata(self, dtype):
# GH 19920
columns =
|
pd.UInt64Index([100, 200, 300], name="foo")
|
pandas.UInt64Index
|
import unittest
from setup.settings import *
from numpy.testing import *
from pandas.util.testing import *
import numpy as np
import dolphindb_numpy as dnp
import pandas as pd
import orca
class FunctionLeftshiftTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
# connect to a DolphinDB server
orca.connect(HOST, PORT, "admin", "123456")
def test_function_math_binary_left_shift_scalar(self):
self.assertEqual(dnp.left_shift(1, 4), np.left_shift(1, 4))
self.assertEqual(dnp.left_shift(1, -5), np.left_shift(1, -5))
self.assertEqual(dnp.left_shift(0, 9), np.left_shift(0, 9))
def test_function_math_binary_left_shift_list(self):
lst1 = [1, 2, 3]
lst2 = [4, 6, 9]
assert_array_equal(dnp.left_shift(lst1, lst2), np.left_shift(lst1, lst2))
def test_function_math_binary_left_shift_array_with_scalar(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
assert_array_equal(dnp.left_shift(dnpa, 1), np.left_shift(npa, 1))
assert_array_equal(dnp.left_shift(1, dnpa), np.left_shift(1, npa))
def test_function_math_binary_left_shift_array_with_array(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
assert_array_equal(dnp.left_shift(dnpa1, dnpa2), np.left_shift(npa1, npa2))
def test_function_math_binary_left_shift_array_with_array_param_out(self):
npa1 = np.array([1, 2, 3])
npa2 = np.array([4, 6, 9])
npa = np.zeros(shape=(1, 3))
dnpa1 = dnp.array([1, 2, 3])
dnpa2 = dnp.array([4, 6, 9])
dnpa = dnp.zeros(shape=(1, 3))
np.left_shift(npa1, npa2, out=npa)
dnp.left_shift(dnpa1, dnpa2, out=dnpa)
assert_array_equal(dnpa, npa)
def test_function_math_binary_left_shift_array_with_series(self):
npa = np.array([1, 2, 3])
dnpa = dnp.array([1, 2, 3])
ps =
|
pd.Series([4, 6, 9])
|
pandas.Series
|
import numpy as np
import pandas as pd
def combined_data(train, test):
"""
Get the combined data
:param trainer pandas.dataframe:
:param test pandas.dataframe:
:return pandas.dataframe:
"""
A = set(train.columns.values)
B = set(test.columns.values)
colToDel = A.difference(B)
total_df = pd.concat([train.drop(colToDel, axis=1), test], axis=0)
return total_df
def remove_duplicate_columns(total_df):
"""
Removing duplicate columns
"""
colsToRemove = []
columns = total_df.columns
for i in range(len(columns) - 1):
v = total_df[columns[i]].values
for j in range(i + 1, len(columns)):
if np.array_equal(v, total_df[columns[j]].values):
colsToRemove.append(columns[j])
colsToRemove = list(set(colsToRemove))
total_df.drop(colsToRemove, axis=1, inplace=True)
print(f">> Dropped {len(colsToRemove)} duplicate columns")
return total_df
def merge_data():
files_to_use = ['bitcoin_price.csv', 'ethereum_price.csv', 'ripple_price.csv', 'litecoin_price.csv']
cols_to_use = []
for ind, file_name in enumerate(files_to_use):
currency_name = file_name.split('_')[0]
if ind == 0:
df = pd.read_csv('../input/' + file_name, usecols=['Date', 'Close'], parse_dates=['Date'])
df.columns = ['Date', currency_name]
else:
temp_df = pd.read_csv('../input/' + file_name, usecols=['Date', 'Close'], parse_dates=['Date'])
temp_df.columns = ['Date', currency_name]
df =
|
pd.merge(df, temp_df, on='Date')
|
pandas.merge
|
import re
import requests
import pandas as pd
import numpy as np
from urllib.request import urlopen
from bs4 import BeautifulSoup
from datetime import datetime,timedelta
headers = {'User-Agent': 'Chrome/39.0.2171.95'}
def parse_option(ticker,stamp_list):
"""Parse option data from 'finance.yahoo.com'
Input: maturities of options
Output: all call and put data like strike, price
"""
call={}
put={}
dates = [str(datetime.fromtimestamp(int(i)).date()+timedelta(days=1)) for i in stamp_list]
# get options in each maturity
for i,timestamp in enumerate(stamp_list):
print(f'Parsing options expiring at {dates[i]}')
the_url = f'https://finance.yahoo.com/quote/{ticker}/options?p=AAPL&date={timestamp}'
response = requests.get(the_url, headers=headers)
soup = BeautifulSoup(response.text, features='lxml')
Strike= soup.find_all('td', {'class': re.compile('data-col2')})
Price=soup.find_all('td', {'class': re.compile('data-col3')})
iVol= soup.find_all('td', {'class': re.compile('data-col10')})
callPrice=[Price[0].get_text()]
putPrice=[]
strike=[Strike[0].get_text()]
iV=[iVol[0].get_text()]
flag=0
for i in range(1,len(Price)):
iV.append(iVol[i].get_text())
strike.append(Strike[i].get_text())
if float(strike[i].replace(',',''))<=float(strike[i-1].replace(',','')):####begin to record put opiton price
flag=1
if flag==0:
callPrice.append(Price[i].get_text())
else:
putPrice.append(Price[i].get_text())
####split the strikes into call and put, remove any , in number string
callStrike= [float(x.replace(',','')) for x in strike[:len(callPrice)]]
putStrike= [float(x.replace(',','')) for x in strike[len(callPrice):]]
callPrice = [float(x.replace(',','')) for x in callPrice]
putPrice = [float(x.replace(',','')) for x in putPrice]
callIV= [float(x[:-1].replace(',','')) for x in iV[:len(callPrice)]]
putIV=[float(x[:-1].replace(',','')) for x in iV[len(callPrice):]]
maturity = str(datetime.fromtimestamp(int(timestamp)).date())
call[maturity] =
|
pd.DataFrame([callPrice,callIV],columns=callStrike,index=['price','iv'])
|
pandas.DataFrame
|
from datetime import datetime
import numpy as np
import pandas as pd
from evidently import ColumnMapping
from evidently.analyzers.data_quality_analyzer import DataQualityAnalyzer
from evidently.analyzers.data_quality_analyzer import FeatureQualityStats
from evidently.analyzers.utils import process_columns
import pytest
@pytest.mark.parametrize(
"dataset, expected_metrics",
[
(
pd.DataFrame({"numerical_feature": []}),
FeatureQualityStats(
feature_type="num",
count=0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
infinite_count=None,
infinite_percentage=None,
max=None,
min=None,
mean=None,
missing_count=None,
missing_percentage=None,
most_common_value=None,
most_common_value_percentage=None,
std=None,
unique_count=None,
unique_percentage=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"numerical_feature": [np.nan, np.nan, np.nan, np.nan]}),
FeatureQualityStats(
feature_type="num",
count=0,
percentile_25=np.nan,
percentile_50=np.nan,
percentile_75=np.nan,
infinite_count=0,
infinite_percentage=0,
max=np.nan,
min=np.nan,
mean=np.nan,
missing_count=4,
missing_percentage=100,
most_common_value=np.nan,
most_common_value_percentage=100,
std=np.nan,
unique_count=0,
unique_percentage=0,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"numerical_feature": [np.nan, 2, 2, 432]}),
FeatureQualityStats(
feature_type="num",
count=3,
infinite_count=0,
infinite_percentage=0.0,
missing_count=1,
missing_percentage=25,
unique_count=2,
unique_percentage=50,
percentile_25=2.0,
percentile_50=2.0,
percentile_75=217.0,
max=432.0,
min=2.0,
mean=145.33,
most_common_value=2,
most_common_value_percentage=50,
std=248.26,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
],
)
def test_data_profile_analyzer_num_features(dataset: pd.DataFrame, expected_metrics: FeatureQualityStats) -> None:
data_profile_analyzer = DataQualityAnalyzer()
data_mapping = ColumnMapping(
numerical_features=["numerical_feature"],
)
result = data_profile_analyzer.calculate(dataset, None, data_mapping)
assert result.reference_features_stats is not None
assert result.reference_features_stats.num_features_stats is not None
assert "numerical_feature" in result.reference_features_stats.num_features_stats
metrics = result.reference_features_stats.num_features_stats["numerical_feature"]
assert metrics == expected_metrics
@pytest.mark.parametrize(
"dataset, expected_metrics",
[
(
pd.DataFrame({"category_feature": []}),
FeatureQualityStats(
feature_type="cat",
count=0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
infinite_count=None,
infinite_percentage=None,
max=None,
min=None,
mean=None,
missing_count=None,
missing_percentage=None,
most_common_value=None,
most_common_value_percentage=None,
std=None,
unique_count=None,
unique_percentage=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"category_feature": [None, None, None, None]}),
FeatureQualityStats(
feature_type="cat",
count=0,
infinite_count=None,
infinite_percentage=None,
missing_count=4,
missing_percentage=100.0,
unique_count=0,
unique_percentage=0.0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value=np.nan,
most_common_value_percentage=100.0,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
new_in_current_values_count=None,
unused_in_current_values_count=None,
),
),
(
pd.DataFrame({"category_feature": [np.nan, 2, 2, 1]}),
FeatureQualityStats(
feature_type="cat",
count=3,
infinite_count=None,
infinite_percentage=None,
missing_count=1,
missing_percentage=25,
unique_count=2,
unique_percentage=50,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value=2,
most_common_value_percentage=50,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"category_feature": ["y", "n", "n/a", "n"]}),
FeatureQualityStats(
feature_type="cat",
count=4,
infinite_count=None,
infinite_percentage=None,
missing_count=0,
missing_percentage=0,
unique_count=3,
unique_percentage=75,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="n",
most_common_value_percentage=50,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
(
pd.DataFrame({"category_feature": ["n", "d", "p", "n"]}),
FeatureQualityStats(
feature_type="cat",
count=4,
infinite_count=None,
infinite_percentage=None,
missing_count=0,
missing_percentage=0,
unique_count=3,
unique_percentage=75,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="n",
most_common_value_percentage=50,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
),
),
],
)
def test_data_profile_analyzer_cat_features(dataset: pd.DataFrame, expected_metrics: FeatureQualityStats) -> None:
data_profile_analyzer = DataQualityAnalyzer()
for task_type in (None, "regression", "classification"):
result = data_profile_analyzer.calculate(
dataset, None, ColumnMapping(categorical_features=["category_feature"], task=task_type)
)
assert result.reference_features_stats is not None
assert result.reference_features_stats.cat_features_stats is not None
assert "category_feature" in result.reference_features_stats.cat_features_stats
metrics = result.reference_features_stats.cat_features_stats["category_feature"]
assert metrics == expected_metrics
def test_data_profile_analyzer_classification_with_target() -> None:
reference_data = pd.DataFrame(
{
"target": ["cat_1", "cat_1", "cat_2", "cat_3", "cat_1"],
"prediction": ["cat_2", "cat_1", "cat_1", "cat_3", "cat_1"],
}
)
current_data = pd.DataFrame(
{
"target": ["cat_1", "cat_6", "cat_2", None, "cat_1"],
"prediction": ["cat_5", "cat_1", "cat_1", "cat_3", np.nan],
}
)
data_profile_analyzer = DataQualityAnalyzer()
data_mapping = ColumnMapping(task="classification")
result = data_profile_analyzer.calculate(reference_data, current_data, data_mapping)
assert result.reference_features_stats is not None
assert result.reference_features_stats.target_stats is not None
assert result.reference_features_stats.target_stats["target"] == FeatureQualityStats(
feature_type="cat",
count=5,
infinite_count=None,
infinite_percentage=None,
missing_count=0,
missing_percentage=0.0,
unique_count=3,
unique_percentage=60.0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="cat_1",
most_common_value_percentage=60.0,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
new_in_current_values_count=None,
unused_in_current_values_count=None,
)
assert result.current_features_stats is not None
assert result.current_features_stats.target_stats is not None
assert result.current_features_stats.target_stats["target"] == FeatureQualityStats(
feature_type="cat",
count=4,
infinite_count=None,
infinite_percentage=None,
missing_count=1,
missing_percentage=20.0,
unique_count=3,
unique_percentage=60.0,
percentile_25=None,
percentile_50=None,
percentile_75=None,
max=None,
min=None,
mean=None,
most_common_value="cat_1",
most_common_value_percentage=40.0,
std=None,
most_common_not_null_value=None,
most_common_not_null_value_percentage=None,
new_in_current_values_count=2,
unused_in_current_values_count=1,
)
@pytest.mark.parametrize(
"reference_dataset, current_dataset, expected_new, expected_unused",
[
(
|
pd.DataFrame({"category_feature": ["", "a", "b"]})
|
pandas.DataFrame
|
import snowflake.connector as sf
import pandas as pd
import matplotlib.pyplot as plt
from config import config
import numpy as np
# Connection String
conn = sf.connect(
user=config.username,
password=config.password,
account=config.account
)
def test_connection(connect, query):
cursor = connect.cursor()
cursor.execute(query)
cursor.close()
sql5 = """
SELECT * FROM "MI_XPRESSCLOUD"."XPRESSFEED"."SP500";
"""
df500 = pd.read_sql(sql5, conn)
sql1 = """
SELECT companyId, proId, personId FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPROFESSIONAL";
"""
sql2 = """
SELECT proId, proFunctionId FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPROTOPROFUNCTION";
"""
sql3 = """
SELECT proFunctionId, proFunctionName FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPROFUNCTION";
"""
sql4 = """
SELECT proId, compensationValue FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQCOMPENSATION";
"""
sql6 = """
SELECT personId, prefix FROM "MI_XPRESSCLOUD"."XPRESSFEED"."CIQPERSON";
"""
df1 =
|
pd.read_sql(sql1, conn)
|
pandas.read_sql
|
from sklearn.preprocessing import MultiLabelBinarizer
import numpy as np
import pandas as pd
import pickle
from tqdm import tqdm
tqdm.pandas()
class DataframePreprocessing:
DEFAULT_TARGET_THEMES = [
5,
6,
26,
33,
139,
163,
232,
313,
339,
350,
406,
409,
555,
589,
597,
634,
660,
695,
729,
766,
773,
793,
800,
810,
852,
895,
951,
975,
]
OTHER_THEMES_VALUE = 4242
def __init__(
self,
df=pd.DataFrame(),
group_processes=True,
x_column_name="page_text_extract",
y_column_name="tema",
target_themes=DEFAULT_TARGET_THEMES,
other_themes_value=OTHER_THEMES_VALUE,
is_incremental_training=False,
remove_processes_without_theme=True,
is_parquet=False,
labels_freq={},
vocab_path="",
):
self.is_incremental_training = is_incremental_training
self.remove_processes_without_theme = remove_processes_without_theme
self.is_parquet = is_parquet
self.x_column_name = x_column_name
self.y_column_name = y_column_name
self.other_themes_value = other_themes_value
self.target_themes = target_themes
self.group_processes = group_processes
self.vocab_path = vocab_path
self.distinct_themes = target_themes + [other_themes_value]
if not df.empty:
if remove_processes_without_theme:
df = df[
df[self.y_column_name].progress_apply(
self._remove_processes_without_theme
)
].copy()
df[self.y_column_name] = (
df[self.y_column_name]
.progress_apply(self._remove_with_without_theme_mixture)
.copy()
)
self._generate_themes_column(df.copy())
self.target_themes.sort()
self.labels_freq = {}
if not labels_freq:
self._set_labels_frequency()
else:
self.labels_freq = labels_freq
self.processed_df = self._process_dataframe()
def _remove_processes_without_theme(self, series):
if 0 in series and len(series) == 1:
return False
return True
def _remove_with_without_theme_mixture(self, series):
if 0 in series and len(series) > 1:
return np.array([v for v in series if v != 0])
return series
def _generate_themes_column(self, df):
if self.group_processes:
self._group_samples_by_process(df)
else:
if not self.is_parquet:
self.df = df
# self.df[self.y_column_name] = self.df[self.y_column_name].apply(lambda x: np.array(x))
# self._transform_array_column(df)
else:
self.df = df
self.df[self.y_column_name] = self.df[self.y_column_name].apply(
lambda x: np.array(x)
)
def _transform_array_column(self, df):
print("Tranforming themes strings to arrays...")
df[self.y_column_name] = df[self.y_column_name].apply(
lambda l: np.fromstring(l[1:-1], sep=" ")
)
self.df = df
def _group_samples_by_process(self, df):
print("Grouping processes...")
self.df = df.groupby("process_id").apply(self._aggregate)
def _aggregate(self, series):
reduced = {}
series[self.x_column_name].drop_duplicates(inplace=True)
reduced[self.x_column_name] = " ".join(
str(x) for x in series[self.x_column_name].values
)
temas = np.unique(series[self.y_column_name].values)
reduced[self.y_column_name] = temas[~np.isnan(temas)]
return pd.Series(reduced, index=[self.x_column_name, *[self.y_column_name]])
def _remove_rare_samples(self, series, threshold=5):
if self.labels_freq.get(tuple(series.tolist())) < threshold:
return False
return True
def _switch_other_themes_values(self, actual_label):
"""
Replace the values of themes that are not in target themes
to a unique value
"""
modified_label = set()
for theme in actual_label:
if theme not in self.target_themes:
modified_label.add(self.other_themes_value)
elif theme != 0:
modified_label.add(theme)
return sorted(modified_label)
def _normalize_labels(self, series):
return np.asarray(self._switch_other_themes_values(series.tolist()))
def _set_labels_frequency(self):
print("Setting labels frequency...")
for label in self.df[self.y_column_name]:
normalized_label = tuple(self._switch_other_themes_values(label))
if not self.labels_freq.get(normalized_label):
self.labels_freq[normalized_label] = 1
else:
self.labels_freq[normalized_label] += 1
def get_labels_frequency(self, series):
print("Setting labels frequency...")
labels_freq = {}
for label in series:
normalized_label = tuple(self._switch_other_themes_values(label))
if not labels_freq.get(normalized_label):
labels_freq[normalized_label] = 1
else:
labels_freq[normalized_label] += 1
return labels_freq
def _get_distinct_themes(self):
if self.is_incremental_training:
self.distinct_themes = self.target_themes + [self.other_themes_value]
else:
distinct_themes = set()
for label in self.df["labels_with_others"]:
for theme in label:
distinct_themes.add(theme)
self.distinct_themes = list(sorted(distinct_themes))
# TODO: Include case when themes are not grouped
def get_unique_binarized_labels(self, df_path, y_column_name, is_parquet=False):
print("Generating set of binarized labels...")
if not is_parquet:
themes = pd.read_csv(df_path, usecols=[y_column_name])
themes[y_column_name] = themes[y_column_name].apply(
lambda l: np.fromstring(l[1:-1], sep=" ")
)
else:
themes = pd.read_parquet(df_path, columns=[y_column_name])
themes[self.y_column_name] = themes[self.y_column_name].apply(
lambda x: np.array(x)
)
themes[self.y_column_name] = themes[self.y_column_name].apply(
self._remove_with_without_theme_mixture
)
if self.remove_processes_without_theme:
themes = themes[
themes[self.y_column_name].progress_apply(
self._remove_processes_without_theme
)
]
labels_freq = self.get_labels_frequency(themes[self.y_column_name])
themes["labels_with_others"] = themes[y_column_name].apply(
self._normalize_labels
)
mlb = MultiLabelBinarizer()
binarized_columns = mlb.fit_transform(themes["labels_with_others"].to_numpy())
unique_labels = []
for bin_label in binarized_columns:
if bin_label.tolist() not in unique_labels:
unique_labels.append(bin_label.tolist())
return unique_labels, labels_freq
def _binarize_labels(self):
print("Binarizing labels...")
self._get_distinct_themes()
mlb = MultiLabelBinarizer(classes=self.distinct_themes)
binarized_columns = mlb.fit_transform(self.df["labels_with_others"].to_numpy())
columns_names = {
ix: binarized_columns[:, i] for i, ix in enumerate(self.distinct_themes)
}
return pd.concat(
[self.df.reset_index(drop=True), pd.DataFrame(columns_names)], axis=1,
)
def _clean_text(self, text, vocab):
text = text.split()
text = [x for x in text if x in vocab]
text = " ".join(text)
return text
def _select_vocab(self, vocab_path):
vocab = pickle.load(open(vocab_path, "rb"))
self.df[self.x_column_name] = self.df[self.x_column_name].progress_apply(
self._clean_text, vocab=vocab
)
def _process_dataframe(self):
self.df = self.df[~
|
pd.isnull(self.df[self.x_column_name])
|
pandas.isnull
|
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
from django_plotly_dash import DjangoDash
import dash_bootstrap_components as dbc
import plotly.graph_objs as go
import random
import json
import pandas as pd
import numpy as np
import datetime
from fall.models import SignupData, Survey, SurveyObs
import plotly.express as px
import re
import eb_passwords
from collections import Counter
DEMO_MODE = True
app = DjangoDash(
'ThreeTeams',
add_bootstrap_links=True,
) # replaces dash.Dash
# prevent setup complex map twice
def empty_map():
fig = go.Figure(go.Scattermapbox(lat=['38.91427',],lon=['-77.02827',]))
fig.update_layout(
mapbox=dict(
center=dict(lat=23.973793,lon=120.979703),
zoom=8,
style='white-bg')
)
return fig
def draw_area_map():
with open('../helper_files/TaiwanCounties_simple.geojson') as f:
geoj = json.load(f)
data =
|
pd.DataFrame()
|
pandas.DataFrame
|
#!/usr/bin/env python3
# coding: utf-8
# @Author: ArthurBernard
# @Email: <EMAIL>
# @Date: 2019-08-30 09:25:01
# @Last modified by: ArthurBernard
# @Last modified time: 2019-09-03 21:56:51
""" Base object to download historical data from REST API.
Notes
-----
The following object is shapped to download data from crypto-currency exchanges
(currently only Binance, GDax, Kraken and Poloniex).
"""
# Import built-in packages
import os
import pathlib
import time
# Import extern packages
import pandas as pd
# Import local packages
from dccd.tools.date_time import date_to_TS, TS_to_date
from dccd.tools.date_time import str_to_span, span_to_str
__all__ = ['ImportDataCryptoCurrencies']
class ImportDataCryptoCurrencies:
""" Base class to import data about crypto-currencies from some exchanges.
Parameters
----------
path : str
The path where data will be save.
crypto : str
The abreviation of the crypto-currencie.
span : {int, 'weekly', 'daily', 'hourly'}
- If str, periodicity of observation.
- If int, number of the seconds between each observation, minimal span\
is 60 seconds.
platform : str
The platform of your choice: 'Kraken', 'Poloniex'.
fiat : str
A fiat currency or a crypto-currency.
form : {'xlsx', 'csv'}
Your favorit format. Only 'xlsx' and 'csv' at the moment.
Notes
-----
Don't use directly this class, use the respective class for each exchange.
See Also
--------
FromBinance, FromKraken, FromGDax, FromPoloniex
Attributes
----------
pair : str
Pair symbol, `crypto + fiat`.
start, end : int
Timestamp to starting and ending download data.
span : int
Number of seconds between observations.
full_path : str
Path to save data.
form : str
Format to save data.
Methods
-------
import_data
save
get_data
"""
def __init__(self, path, crypto, span, platform, fiat='EUR', form='xlsx'):
""" Initialize object. """
self.path = path
self.crypto = crypto
self.span, self.per = self._period(span)
self.fiat = fiat
self.pair = str(crypto + fiat)
self.full_path = self.path + '/' + platform + '/Data/Clean_Data/'
self.full_path += str(self.per) + '/' + self.pair
self.last_df =
|
pd.DataFrame()
|
pandas.DataFrame
|
"""
assign cell identity based on SNR and UMI_min
"""
from celescope.__init__ import ROOT_PATH
from celescope.tools.step import Step, s_common
import celescope.tools.utils as utils
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import subprocess
import matplotlib
matplotlib.use('Agg')
def get_opts_count_tag(parser, sub_program):
parser.add_argument(
"--UMI_min",
help="Default='auto'. Minimum UMI threshold. Cell barcodes with valid UMI < UMI_min are classified as *undeterminded*.",
default="auto"
)
parser.add_argument(
"--dim",
help="Default=1. Tag dimentions. Usually we use 1-dimentional tag.",
default=1
)
parser.add_argument(
"--SNR_min",
help="""Default='auto'. Minimum signal-to-noise ratio.
Cell barcodes with UMI >=UMI_min and SNR < SNR_min are classified as *multiplet*. """,
default="auto"
)
parser.add_argument("--combine_cluster",
help="Conbine cluster tsv file.", default=None)
parser.add_argument(
"--coefficient",
help="""Default=0.1. If `SNR_min` is 'auto', minimum signal-to-noise ratio is calulated as
`SNR_min = max(median(SNRs) * coefficient, 2)`.
Smaller `coefficient` will cause less *multiplet* in the tag assignment.""",
default=0.1
)
if sub_program:
parser.add_argument("--read_count_file", help="Tag read count file.", required=True)
parser.add_argument("--match_dir", help="Match celescope scRNA-Seq directory.")
parser.add_argument("--matrix_dir", help="Match celescope scRNA-Seq matrix directory.")
parser.add_argument("--tsne_file", help="t-SNE coord file.")
s_common(parser)
def count_tag(args):
step_name = "count_tag"
runner = Count_tag(args, step_name)
runner.run()
class Count_tag(Step):
"""
Features
- Assign tag to each cell barcode and summarize.
Output
- `{sample}_umi_tag.tsv`
`first column` cell barcode
`last column` assigned tag
`columns between first and last` UMI count for each tag
- `{sample}_tsne_tag.tsv` it is `{sample}_umi_tag.tsv` with t-SNE coordinates, gene_counts and cluster infomation
- `{sample}_cluster_count.tsv` cell barcode number assigned to *undeterminded*, *multiplet* and *each tag*
"""
def __init__(self, args, step_name):
Step.__init__(self, args, step_name)
self.read_count_file = args.read_count_file
self.UMI_min = args.UMI_min
self.SNR_min = args.SNR_min
self.combine_cluster = args.combine_cluster
self.dim = int(args.dim)
self.coefficient = float(args.coefficient)
# read
self.df_read_count =
|
pd.read_csv(self.read_count_file, sep="\t", index_col=0)
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
from datetime import timedelta, time
import numpy as np
from pandas import (DatetimeIndex, Float64Index, Index, Int64Index,
NaT, Period, PeriodIndex, Series, Timedelta,
TimedeltaIndex, date_range, period_range,
timedelta_range, notnull)
import pandas.util.testing as tm
import pandas as pd
from pandas.lib import Timestamp
from .common import Base
class DatetimeLike(Base):
def test_shift_identity(self):
idx = self.create_index()
self.assert_index_equal(idx, idx.shift(0))
def test_str(self):
# test the string repr
idx = self.create_index()
idx.name = 'foo'
self.assertFalse("length=%s" % len(idx) in str(idx))
self.assertTrue("'foo'" in str(idx))
self.assertTrue(idx.__class__.__name__ in str(idx))
if hasattr(idx, 'tz'):
if idx.tz is not None:
self.assertTrue(idx.tz in str(idx))
if hasattr(idx, 'freq'):
self.assertTrue("freq='%s'" % idx.freqstr in str(idx))
def test_view(self):
super(DatetimeLike, self).test_view()
i = self.create_index()
i_view = i.view('i8')
result = self._holder(i)
tm.assert_index_equal(result, i)
i_view = i.view(self._holder)
result = self._holder(i)
tm.assert_index_equal(result, i_view)
class TestDatetimeIndex(DatetimeLike, tm.TestCase):
_holder = DatetimeIndex
_multiprocess_can_split_ = True
def setUp(self):
self.indices = dict(index=tm.makeDateIndex(10))
self.setup_indices()
def create_index(self):
return date_range('20130101', periods=5)
def test_shift(self):
# test shift for datetimeIndex and non datetimeIndex
# GH8083
drange = self.create_index()
result = drange.shift(1)
expected = DatetimeIndex(['2013-01-02', '2013-01-03', '2013-01-04',
'2013-01-05',
'2013-01-06'], freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(-1)
expected = DatetimeIndex(['2012-12-31', '2013-01-01', '2013-01-02',
'2013-01-03', '2013-01-04'],
freq='D')
self.assert_index_equal(result, expected)
result = drange.shift(3, freq='2D')
expected = DatetimeIndex(['2013-01-07', '2013-01-08', '2013-01-09',
'2013-01-10',
'2013-01-11'], freq='D')
self.assert_index_equal(result, expected)
def test_construction_with_alt(self):
i = pd.date_range('20130101', periods=5, freq='H', tz='US/Eastern')
i2 = DatetimeIndex(i, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype)
self.assert_index_equal(i, i2)
i2 = DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
self.assert_index_equal(i2, expected)
# incompat tz/dtype
self.assertRaises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_pickle_compat_construction(self):
pass
def test_construction_index_with_mixed_timezones(self):
# GH 11488
# no tz results in DatetimeIndex
result = Index(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
# different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertFalse(isinstance(result, DatetimeIndex))
# passing tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 19:00'),
Timestamp('2011-01-03 00:00')],
tz='Asia/Tokyo', name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNone(result.tz)
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
self.assert_index_equal(result, exp, exact=True)
self.assertTrue(isinstance(result, DatetimeIndex))
self.assertIsNotNone(result.tz)
self.assertEqual(result.tz, exp.tz)
def test_construction_index_with_mixed_timezones_with_NaT(self):
# GH 11488
result = Index([pd.NaT,
|
Timestamp('2011-01-01')
|
pandas.lib.Timestamp
|
# python 2/3 compatibility
from __future__ import division, print_function
# global imports
import json
import copy
import pandas
import numpy
from .information_block import InformationBlock
class ParameterBlock(InformationBlock):
"""
Class holding data from model-simulations.
"""
def addEntries(self, Dict):
for i in Dict.keys():
self.Elements[i] = Dict[i]
def fromDict(self, Dict):
self.Elements = Dict
def JSONize(self):
Block = self.Elements
block2 = copy.deepcopy(Block)
for i in list(Block.keys()):
if type(Block[i]) is dict:
for j in list(Block[i].keys()):
block2[i][j] = json.dumps(Block[i][j], default=JSON_Int64_compensation)
else:
block2[i] = json.dumps(Block[i], default=JSON_Int64_compensation)
return(block2)
def toDataFrame(self, Col_list=None):
Block = self.Elements
if len(list(Block.keys())) > 0:
if Col_list is None:
fields = list(Block[list(Block.keys())[0]].keys())
else:
fields = Col_list
TableOut = pandas.DataFrame(index=list(Block.keys()), columns=fields)
for i in list(Block.keys()):
for j in fields:
try:
TableOut.loc[i, j] = abs(round(Block[i][j], 5))
except:
TableOut.loc[i, j] = Block[i][j]
return TableOut
else:
return pandas.DataFrame()
def toDataFrame_SBtabCompatibility(self, NameList=None, Col_list=None):
Block = self.Elements
if len(list(Block.keys())) > 0:
if Col_list is None:
fields = list(Block[list(Block.keys())[0]].keys())
else:
fields = Col_list
if NameList is not None:
if len(fields) == len(NameList):
colNames = NameList
else:
colNames = fields
else:
colNames = fields
TableOut =
|
pandas.DataFrame(columns=fields)
|
pandas.DataFrame
|
import pandas as pd
import numpy as np
# import lightgbm as lgb
import loadsave as ls
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
from keras import backend as K
from sklearn import preprocessing
from sklearn.utils import class_weight
from collections import Counter
# import tensorflow as tf
# from keras.backend.tensorflow_backend import set_session
# config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction = 0.5
# config.gpu_options.visible_device_list = "0"
# set_session(tf.Session(config=config))
def setDayTime(row):
row['isWeekend'] = np.where(((row['tweekday'] == 5) | (row['tweekday'] == 6)),1,0)
row['isLateNight'] = np.where(((row['thour'] <= 7) | (row['thour'] >= 22)),1,0)
row['isNight'] = np.where(((row['thour'] <= 3) | (row['thour'] >= 19)),1,0)
row['isEarlyMorn'] = np.where(((row['thour'] >= 7) & (row['thour'] <= 12)),1,0)
row['isDay'] = np.where(((row['thour'] >= 10) & (row['thour'] <= 17)),1,0)
row['isNoon'] = np.where(((row['thour'] >= 15) & (row['thour'] <= 21)),1,0)
def isWeekend(row):
if row['tweekday'] == 5 or row['tweekday'] == 6:
return 1
else:
return 0
def isLateNight(row):
if row['thour'] <= 7 or row['thour'] >= 22:
return 1
else:
return 0
def isNight(row):
if row['thour'] <= 3 or row['thour'] >= 19:
return 1
else:
return 0
def isEarlyMorn(row):
if row['thour'] >= 7 and row['thour'] <= 12:
return 1
else:
return 0
def printConfMat(y_true, y_pred):
confMat=(metrics.confusion_matrix(y_true, y_pred))
print(" ")
print(confMat)
print(0,confMat[0][0]/(confMat[0][0]+confMat[0][1]))
print(1,confMat[1][1]/(confMat[1][1]+confMat[1][0]))
def isDay(row):
if row['thour'] >= 10 and row['thour'] <= 17:
return 1
else:
return 0
def isNoon(row):
if row['thour'] >= 15 and row['thour'] <= 21:
return 1
else:
return 0
isPreprocess= False
if (isPreprocess):
print("Reading data.....")
train = pd.read_csv("input/train.csv")
test =
|
pd.read_csv("input/test.csv")
|
pandas.read_csv
|
# -*- coding: utf-8 -*-
# Copyright (c) 2021, libracore AG and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import pandas as pd
from frappe.utils.data import add_days, getdate, get_datetime, now_datetime
# Header mapping (ERPNext <> MVD)
hm = {
'mitglied_nr': 'mitglied_nr',
'mitglied_id': 'mitglied_id',
'status_c': 'status_c',
'sektion_id': 'sektion_id',
'zuzug_sektion': 'sektion_zq_id',
'mitgliedtyp_c': 'mitgliedtyp_c',
'mitglied_c': 'mitglied_c',
'wichtig': 'wichtig',
'eintritt': 'datum_eintritt',
'austritt': 'datum_austritt',
'wegzug': 'datum_wegzug',
'zuzug': 'datum_zuzug',
'kuendigung': 'datum_kuend_per',
'adresstyp_c': 'adresstyp_c',
'adress_id': 'adress_id',
'firma': 'firma',
'zusatz_firma': 'zusatz_firma',
'anrede_c': 'anrede_c',
'nachname_1': 'nachname_1',
'vorname_1': 'vorname_1',
'tel_p_1': 'tel_p_1',
'tel_m_1': 'tel_m_1',
'tel_g_1': 'tel_g_1',
'e_mail_1': 'e_mail_1',
'zusatz_adresse': 'zusatz_adresse',
'strasse': 'strasse',
'nummer': 'nummer',
'nummer_zu': 'nummer_zu',
'postfach': 'postfach',
'postfach_nummer': 'postfach_nummer',
'plz': 'plz',
'ort': 'ort',
'nachname_2': 'nachname_2',
'vorname_2': 'vorname_2',
'tel_p_2': 'tel_p_2',
'tel_m_2': 'tel_m_2',
'tel_g_2': 'tel_g_2',
'e_mail_2': 'e_mail_2',
'datum': 'datum',
'jahr': 'jahr',
'offen': 'offen',
'ref_nr_five_1': 'ref_nr_five_1',
'kz_1': 'kz_1',
'tkategorie_d': 'tkategorie_d',
'pers_name': 'pers_name',
'datum_von': 'datum_von',
'datum_bis': 'datum_bis',
'datum_erinnerung': 'datum_erinnerung',
'notiz_termin': 'notiz_termin',
'erledigt': 'erledigt',
'nkategorie_d': 'nkategorie_d',
'notiz': 'notiz',
'weitere_kontaktinfos': 'weitere_kontaktinfos',
'mkategorie_d': 'mkategorie_d',
'benutzer_name': 'benutzer_name',
'jahr_bez_mitgl': 'jahr_bez_mitgl',
'objekt_hausnummer': 'objekt_hausnummer',
'nummer_zu': 'nummer_zu',
'objekt_nummer_zu': 'objekt_nummer_zu',
'rg_nummer_zu': 'rg_nummer_zu',
'buchungen': 'buchungen',
'online_haftpflicht': 'online_haftpflicht',
'online_gutschrift': 'online_gutschrift',
'online_betrag': 'online_betrag',
'datum_online_verbucht': 'datum_online_verbucht',
'datum_online_gutschrift': 'datum_online_gutschrift',
'online_payment_method': 'online_payment_method',
'online_payment_id': 'online_payment_id'
}
def read_csv(site_name, file_name, limit=False):
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if not migliedschaft_existiert(str(get_value(row, 'mitglied_id'))):
if get_value(row, 'adresstyp_c') == 'MITGL':
create_mitgliedschaft(row)
else:
frappe.log_error("{0}".format(row), 'Adresse != MITGL, aber ID noch nicht erfasst')
else:
update_mitgliedschaft(row)
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_mitgliedschaft(data):
try:
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitgliedtyp_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
new_mitgliedschaft = frappe.get_doc({
'doctype': 'MV Mitgliedschaft',
'mitglied_nr': str(get_value(data, 'mitglied_nr')).zfill(8),
'mitglied_id': str(get_value(data, 'mitglied_id')),
'status_c': get_status_c(get_value(data, 'status_c')),
'sektion_id': get_sektion(get_value(data, 'sektion_id')),
'mitgliedtyp_c': get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c')),
'mitglied_c': get_mitglied_c(get_value(data, 'mitglied_c')),
#'wichtig': get_value(data, 'wichtig'),
'eintritt': get_formatted_datum(get_value(data, 'eintritt')),
'austritt': get_formatted_datum(get_value(data, 'austritt')),
'wegzug': get_formatted_datum(get_value(data, 'wegzug')),
#'wegzug_zu': '', --> woher kommt diese Info?
'zuzug': zuzug,
'zuzug_von': zuzug_von,
'kuendigung': get_formatted_datum(get_value(data, 'kuendigung')),
'kundentyp': kundentyp,
'firma': get_value(data, 'firma'),
'zusatz_firma': get_value(data, 'zusatz_firma'),
'anrede_c': get_anrede_c(get_value(data, 'anrede_c')),
'nachname_1': get_value(data, 'nachname_1'),
'vorname_1': get_value(data, 'vorname_1'),
'tel_p_1': str(get_value(data, 'tel_p_1')),
'tel_m_1': str(get_value(data, 'tel_m_1')),
'tel_g_1': str(get_value(data, 'tel_g_1')),
'e_mail_1': get_value(data, 'e_mail_1'),
'zusatz_adresse': get_value(data, 'zusatz_adresse'),
'strasse': strasse,
'objekt_strasse': strasse, # fallback
'objekt_ort': get_value(data, 'ort'), # fallback
'nummer': get_value(data, 'nummer'),
'nummer_zu': get_value(data, 'nummer_zu'),
'postfach': postfach,
'postfach_nummer': get_value(data, 'postfach_nummer'),
'plz': get_value(data, 'plz'),
'ort': get_value(data, 'ort'),
'hat_solidarmitglied': hat_solidarmitglied,
'nachname_2': get_value(data, 'nachname_2'),
'vorname_2': get_value(data, 'vorname_2'),
'tel_p_2': str(get_value(data, 'tel_p_2')),
#'tel_m_2': str(get_value(data, 'tel_m_2')),
'tel_g_2': str(get_value(data, 'tel_g_2')),
'e_mail_2': str(get_value(data, 'e_mail_2'))
})
new_mitgliedschaft.insert()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n---\n{1}".format(err, data), 'create_mitgliedschaft')
return
def update_mitgliedschaft(data):
try:
mitgliedschaft = frappe.get_doc("MV Mitgliedschaft", str(get_value(data, 'mitglied_id')))
if get_value(data, 'adresstyp_c') == 'MITGL':
# Mitglied (inkl. Soli)
if get_value(data, 'vorname_2') or get_value(data, 'nachname_2'):
hat_solidarmitglied = 1
else:
hat_solidarmitglied = 0
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
kundentyp = 'Einzelperson'
if get_value(data, 'mitglied_c') == 'GESCH':
kundentyp = 'Unternehmen'
zuzug = get_formatted_datum(get_value(data, 'zuzug'))
if zuzug:
zuzug_von = get_sektion(get_value(data, 'zuzug_sektion'))
else:
zuzug_von = ''
mitgliedschaft.mitglied_nr = str(get_value(data, 'mitglied_nr')).zfill(8)
mitgliedschaft.status_c = get_status_c(get_value(data, 'status_c'))
mitgliedschaft.sektion_id = get_sektion(get_value(data, 'sektion_id'))
mitgliedschaft.mitgliedtyp_c = get_mitgliedtyp_c(get_value(data, 'mitgliedtyp_c'))
mitgliedschaft.mitglied_c = get_mitglied_c(get_value(data, 'mitglied_c'))
#mitgliedschaft.wichtig = get_value(data, 'wichtig')
mitgliedschaft.eintritt = get_formatted_datum(get_value(data, 'eintritt'))
mitgliedschaft.austritt = get_formatted_datum(get_value(data, 'austritt'))
mitgliedschaft.wegzug = get_formatted_datum(get_value(data, 'wegzug'))
mitgliedschaft.zuzug = zuzug
#mitgliedschaft.wegzug_zu = '' --> woher kommt diese Info?
mitgliedschaft.zuzug_von = zuzug_von
mitgliedschaft.kuendigung = get_formatted_datum(get_value(data, 'kuendigung'))
mitgliedschaft.kundentyp = kundentyp
mitgliedschaft.firma = get_value(data, 'firma')
mitgliedschaft.zusatz_firma = get_value(data, 'zusatz_firma')
mitgliedschaft.anrede_c = get_anrede_c(get_value(data, 'anrede_c'))
mitgliedschaft.nachname_1 = get_value(data, 'nachname_1')
mitgliedschaft.vorname_1 = get_value(data, 'vorname_1')
mitgliedschaft.tel_p_1 = str(get_value(data, 'tel_p_1'))
mitgliedschaft.tel_m_1 = str(get_value(data, 'tel_m_1'))
mitgliedschaft.tel_g_1 = str(get_value(data, 'tel_g_1'))
mitgliedschaft.e_mail_1 = get_value(data, 'e_mail_1')
mitgliedschaft.zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.strasse = strasse
mitgliedschaft.nummer = get_value(data, 'nummer')
mitgliedschaft.nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.postfach = postfach
mitgliedschaft.postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.plz = get_value(data, 'plz')
mitgliedschaft.ort = get_value(data, 'ort')
mitgliedschaft.hat_solidarmitglied = hat_solidarmitglied
mitgliedschaft.nachname_2 = get_value(data, 'nachname_2')
mitgliedschaft.vorname_2 = get_value(data, 'vorname_2')
mitgliedschaft.tel_p_2 = str(get_value(data, 'tel_p_2'))
#mitgliedschaft.tel_m_2 = str(get_value(data, 'tel_m_2'))
mitgliedschaft.tel_g_2 = str(get_value(data, 'tel_g_2'))
mitgliedschaft.e_mail_2 = get_value(data, 'e_mail_2')
mitgliedschaft.adress_id_mitglied = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'OBJEKT':
# Objekt Adresse
mitgliedschaft.objekt_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.objekt_strasse = get_value(data, 'strasse') or 'Fehlende Angaben!'
mitgliedschaft.objekt_hausnummer = get_value(data, 'nummer')
mitgliedschaft.objekt_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.objekt_plz = get_value(data, 'plz')
mitgliedschaft.objekt_ort = get_value(data, 'ort') or 'Fehlende Angaben!'
mitgliedschaft.adress_id_objekt = get_value(data, 'adress_id')
elif get_value(data, 'adresstyp_c') == 'RECHN':
# Rechnungs Adresse
strasse = get_value(data, 'strasse')
postfach = check_postfach(data, 'postfach')
if postfach == 1:
strasse = 'Postfach'
else:
if get_value(data, 'postfach_nummer') and not strasse:
strasse = 'Postfach'
postfach = 1
mitgliedschaft.abweichende_rechnungsadresse = 1
mitgliedschaft.rg_zusatz_adresse = get_value(data, 'zusatz_adresse')
mitgliedschaft.rg_strasse = strasse
mitgliedschaft.rg_nummer = get_value(data, 'nummer')
mitgliedschaft.rg_nummer_zu = get_value(data, 'nummer_zu')
mitgliedschaft.rg_postfach = postfach
mitgliedschaft.rg_postfach_nummer = get_value(data, 'postfach_nummer')
mitgliedschaft.rg_plz = get_value(data, 'plz')
mitgliedschaft.rg_ort = get_value(data, 'ort')
mitgliedschaft.adress_id_rg = get_value(data, 'adress_id')
# else:
# TBD!
mitgliedschaft.save(ignore_permissions=True)
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n{1}".format(err, data), 'update_mitgliedschaft')
return
def get_sektion(id):
# Aufliestung nicht abschliessend, prüfen!
if id == 25:
return 'MVD'
elif id == 4:
return 'Bern'
elif id == 8:
return 'Basel Stadt'
elif id == 14:
return 'Luzern'
elif id == 3:
return 'Aargau'
else:
return 'Sektions-ID unbekannt'
def get_status_c(status_c):
# Aufliestung vermutlich nicht abschliessend, prüfen!
if status_c == 'AREG':
return 'Mitglied'
elif status_c == 'MUTATI':
return 'Mutation'
elif status_c == 'AUSSCH':
return 'Ausschluss'
elif status_c == 'GESTOR':
return 'Gestorben'
elif status_c == 'KUNDIG':
return 'Kündigung'
elif status_c == 'WEGZUG':
return 'Wegzug'
elif status_c == 'ZUZUG':
return 'Zuzug'
else:
return 'Mitglied'
def get_mitgliedtyp_c(mitgliedtyp_c):
# TBD!!!!!!!!!!
if mitgliedtyp_c == 'PRIV':
return 'Privat'
else:
return 'Privat'
def get_mitglied_c(mitglied_c):
# TBD!!!!!!!!!!
if mitglied_c == 'MITGL':
return 'Mitglied'
else:
return 'Mitglied'
def get_anrede_c(anrede_c):
anrede_c = int(anrede_c)
if anrede_c == 1:
return 'Herr'
elif anrede_c == 2:
return 'Frau'
elif anrede_c == 3:
return 'Frau und Herr'
elif anrede_c == 4:
return 'Herr und Frau'
elif anrede_c == 5:
return 'Familie'
elif anrede_c == 7:
return 'Herren'
elif anrede_c == 8:
return 'Frauen'
else:
return ''
def get_formatted_datum(datum):
if datum:
datum_raw = datum.split(" ")[0]
if not datum_raw:
return ''
else:
return datum_raw.replace("/", "-")
else:
return ''
def check_postfach(row, value):
value = row[hm[value]]
if not pd.isnull(value):
postfach = int(value)
if postfach < 0:
return 1
else:
return 0
else:
return 0
def get_value(row, value):
value = row[hm[value]]
if not pd.isnull(value):
try:
if isinstance(value, str):
return value.strip()
else:
return value
except:
return value
else:
return ''
def migliedschaft_existiert(mitglied_id):
anz = frappe.db.sql("""SELECT COUNT(`name`) AS `qty` FROM `tabMitgliedschaft` WHERE `mitglied_id` = '{mitglied_id}'""".format(mitglied_id=mitglied_id), as_dict=True)[0].qty
if anz > 0:
return True
else:
return False
# --------------------------------------------------------------
# Debitor Importer
# --------------------------------------------------------------
def import_debitoren(site_name, file_name, limit=False, delete_from=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_debitoren --kwargs "{'site_name': 'site1.local', 'file_name': 'offene_rechnungen.csv'}"
'''
if delete_from:
SQL_SAFE_UPDATES_false = frappe.db.sql("""SET SQL_SAFE_UPDATES=0""", as_list=True)
delete_sinvs = frappe.db.sql("""DELETE FROM `tabSales Invoice` WHERE `sektion_id` = '{delete_from}'
AND `docstatus` = 1
AND `status` = 'Overdue'""".format(delete_from=delete_from), as_list=True)
SQL_SAFE_UPDATES_true = frappe.db.sql("""SET SQL_SAFE_UPDATES=1""", as_list=True)
frappe.db.commit()
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if get_value(row, 'offen') > 0:
if not migliedschaft_existiert(str(get_value(row, 'mitglied_id'))):
frappe.log_error("{0}".format(row), 'Mitglied existiert nicht')
else:
erstelle_rechnung(row)
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def erstelle_rechnung(row):
try:
file_qrr = int(str(get_value(row, 'ref_nr_five_1')).replace(" ", ""))
qrr = '{num:027d}'.format(num=file_qrr)
existing_sinv_query = ("""SELECT `name` FROM `tabSales Invoice` WHERE REPLACE(`esr_reference`, ' ', '') = '{qrr}'""".format(qrr=qrr))
if len(frappe.db.sql(existing_sinv_query, as_list=True)) > 0:
frappe.log_error("{0}".format(row), 'Rechnung wurde bereits erstellt')
return
else:
existing_sinv_query = ("""SELECT `name` FROM `tabSales Invoice` WHERE `mv_mitgliedschaft` = '{mitglied_id}'""".format(mitglied_id=str(get_value(row, 'mitglied_id'))))
existing_sinv = frappe.db.sql(existing_sinv_query, as_dict=True)
if len(existing_sinv) > 0:
frappe.db.sql("""UPDATE `tabSales Invoice` SET `esr_reference` = '{qrr}' WHERE `name` = '{name}'""".format(qrr=qrr, name=existing_sinv[0].name), as_list=True)
frappe.log_error("{0}".format(row), 'Update QRR')
return
else:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
posting_date = str(get_value(row, 'datum')).split(" ")[0]
item = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "mitgliedschafts_artikel")
company = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "company")
cost_center = frappe.get_value("Company", company, "cost_center")
sektions_code = str(frappe.get_value("Sektion", mitgliedschaft.sektion_id, "sektion_id"))
sinv = frappe.get_doc({
"doctype": "Sales Invoice",
"company": company,
"customer": mitgliedschaft.rg_kunde or mitgliedschaft.kunde_mitglied,
"set_posting_time": 1,
"posting_date": posting_date,
"posting_time": str(get_value(row, 'datum')).split(" ")[1],
"ist_mitgliedschaftsrechnung": 1,
"mv_mitgliedschaft": mitgliedschaft.name,
"sektion_id": mitgliedschaft.sektion_id,
"sektions_code": sektions_code,
"mitgliedschafts_jahr": str(get_value(row, 'jahr')),
"due_date": add_days(posting_date, 30),
"esr_reference": qrr,
"items": [
{
"item_code": item,
"qty": 1,
"rate": get_value(row, 'offen'),
"cost_center": cost_center
}
]
})
sinv.insert()
sinv.submit()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Rechnung konnte nicht erstellt werden')
return
# --------------------------------------------------------------
# Miveba-Termin Importer
# --------------------------------------------------------------
def import_termine(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_termine --kwargs "{'site_name': 'site1.local', 'file_name': 'termine.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
create_termin(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_termin(row):
try:
kategorie = check_kategorie(row)
kontakt = check_kontakt(row)
termin_status = check_termin_status(row, 'erledigt')
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
new = frappe.get_doc({
"doctype": "Termin",
"kategorie": kategorie,
"kontakt": kontakt,
"sektion_id": sektion_id,
"von": str(get_value(row, 'datum_von')),
"bis": str(get_value(row, 'datum_bis')),
"erinnerung": str(get_value(row, 'datum_erinnerung')),
"notitz": str(get_value(row, 'notiz_termin')),
"status": termin_status,
"mv_mitgliedschaft": str(get_value(row, 'mitglied_id'))
})
new.insert()
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
def check_kategorie(row):
kategorie = str(get_value(row, 'tkategorie_d'))
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
query = ("""SELECT `name` FROM `tabTerminkategorie` WHERE `kategorie` = '{kategorie}' AND `sektion_id` = '{sektion_id}'""".format(kategorie=kategorie, sektion_id=sektion_id))
kat = frappe.db.sql(query, as_list=True)
if len(kat) > 0:
return kat[0][0]
else:
new = frappe.get_doc({
"doctype": "Terminkategorie",
"kategorie": kategorie,
"sektion_id": sektion_id
})
new.insert()
frappe.db.commit()
return new.name
def check_kontakt(row):
kontakt = str(get_value(row, 'pers_name'))
if kontakt and kontakt != '':
sektion_id = frappe.get_value("Mitgliedschaft", str(get_value(row, 'mitglied_id')), "sektion_id")
query = ("""SELECT `name` FROM `tabTermin Kontaktperson` WHERE `kontakt` = '{kontakt}' AND `sektion_id` = '{sektion_id}'""".format(kontakt=kontakt, sektion_id=sektion_id))
kat = frappe.db.sql(query, as_list=True)
if len(kat) > 0:
return kat[0][0]
else:
new = frappe.get_doc({
"doctype": "Termin Kontaktperson",
"kontakt": kontakt,
"sektion_id": sektion_id
})
new.insert()
frappe.db.commit()
return new.name
else:
return ''
def check_termin_status(row, value):
value = row[hm[value]]
if not pd.isnull(value):
termin_status = int(value)
if termin_status < 0:
return 'Closed'
else:
return 'Open'
else:
return 'Open'
# --------------------------------------------------------------
# Miveba-Notizen Importer
# --------------------------------------------------------------
def import_notizen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_notizen --kwargs "{'site_name': 'site1.local', 'file_name': 'notizen.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
create_notiz(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Notiz konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def create_notiz(row):
try:
datum_erinnerung = str(get_value(row, 'datum_erinnerung'))
if get_datetime(datum_erinnerung) > now_datetime():
create_todo(row)
else:
create_comment(row)
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Termin konnte nicht erstellt werden')
def create_comment(row):
try:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
description = str(get_value(row, 'nkategorie_d')) + "<br>"
description += str(get_value(row, 'datum_von')) + "<br>"
description += str(get_value(row, 'notiz')) + "<br>"
description += str(get_value(row, 'benutzer_name')) + "<br>"
mitgliedschaft.add_comment('Comment', text=description)
frappe.db.commit()
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Kommentar konnte nicht erstellt werden')
def create_todo(row):
try:
description = str(get_value(row, 'nkategorie_d')) + "<br>"
description += str(get_value(row, 'datum_von')) + "<br>"
description += str(get_value(row, 'notiz')) + "<br>"
description += str(get_value(row, 'benutzer_name')) + "<br>"
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
owner = frappe.get_value("Sektion", mitgliedschaft.sektion_id, "virtueller_user")
todo = frappe.get_doc({
"doctype":"ToDo",
"owner": owner,
"reference_type": "Mitgliedschaft",
"reference_name": str(get_value(row, 'mitglied_id')),
"description": description or '',
"priority": "Medium",
"status": "Open",
"date": str(get_value(row, 'datum_erinnerung')),
"assigned_by": owner,
"mv_mitgliedschaft": str(get_value(row, 'mitglied_id'))
}).insert(ignore_permissions=True)
frappe.db.commit()
return
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'ToDo konnte nicht erstellt werden')
# --------------------------------------------------------------
# Weitere Kontaktinfos Importer
# --------------------------------------------------------------
def import_weitere_kontaktinfos(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_weitere_kontaktinfos --kwargs "{'site_name': 'site1.local', 'file_name': 'weitere_kontaktinfos.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
erstelle_weitere_kontaktinformation(row)
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Weitere Kontaktinformation konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
def erstelle_weitere_kontaktinformation(row):
try:
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
description = str(get_value(row, 'weitere_kontaktinfos')).replace("\n", "<br>")
mitgliedschaft.add_comment('Comment', text=description)
frappe.db.commit()
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Kommentar konnte nicht erstellt werden')
# --------------------------------------------------------------
# Miveba Buchungen Importer
# --------------------------------------------------------------
def import_miveba_buchungen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_miveba_buchungen --kwargs "{'site_name': 'site1.local', 'file_name': 'miveba_buchungen.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
commit_count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
mitglied_id = str(get_value(row, 'mitglied_id'))
miveba_buchungen = str(get_value(row, 'buchungen'))
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `miveba_buchungen` = '{miveba_buchungen}' WHERE `name` = '{mitglied_id}'""".format(miveba_buchungen=miveba_buchungen, mitglied_id=mitglied_id), as_list=True)
if commit_count == 1000:
frappe.db.commit()
commit_count = 1
else:
commit_count += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Miveba Buchung konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Tags Importer
# --------------------------------------------------------------
def import_tags(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_tags --kwargs "{'site_name': 'site1.local', 'file_name': 'kategorien.csv'}"
'''
from frappe.desk.tags import add_tag
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
add_tag(str(get_value(row, 'mkategorie_d')), "Mitgliedschaft", str(get_value(row, 'mitglied_id')))
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Tag konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Special Importer
# --------------------------------------------------------------
def import_special(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.import_special --kwargs "{'site_name': 'site1.local', 'file_name': 'jahr_bez_mitgl-PROD-1.csv'}"
'''
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
commit_count = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
mitglied_id = str(get_value(row, 'mitglied_id'))
jahr = str(get_value(row, 'jahr_bez_mitgl'))
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `zahlung_mitgliedschaft` = '{jahr}' WHERE `name` = '{mitglied_id}'""".format(jahr=jahr, mitglied_id=mitglied_id), as_list=True)
frappe.db.commit()
if int(jahr) == 2022:
sinvs = frappe.db.sql("""SELECT `name` FROM `tabSales Invoice` WHERE `mv_mitgliedschaft` = '{mitglied_id}' AND `status` != 'Paid' AND `docstatus` = 1""".format(mitglied_id=mitglied_id), as_dict=True)
for sinv in sinvs:
try:
sinv = frappe.get_doc("Sales Invoice", sinv.name)
sinv.cancel()
sinv.delete()
frappe.db.commit()
except Exception as e:
frappe.log_error("{0}\n\n{1}\n\n{2}".format(e, sinv.name, row), 'RG konnte nicht gelöscht werden')
commit_count += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Special konnte nicht erstellt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
else:
break
# --------------------------------------------------------------
# Adressen Update
# --------------------------------------------------------------
def update_adressen(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_adressen --kwargs "{'site_name': 'site1.local', 'file_name': 'hausnummer_zusatz_gefiltert.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import create_sp_queue
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
submit_counter = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
objekt_hausnummer = str(get_value(row, 'objekt_hausnummer'))
nummer_zu = str(get_value(row, 'nummer_zu'))
objekt_nummer_zu = str(get_value(row, 'objekt_nummer_zu'))
rg_nummer_zu = str(get_value(row, 'rg_nummer_zu'))
mitgliedschaft = frappe.get_doc("Mitgliedschaft", str(get_value(row, 'mitglied_id')))
mitgliedschaft.objekt_hausnummer = objekt_hausnummer
mitgliedschaft.nummer_zu = nummer_zu
mitgliedschaft.objekt_nummer_zu = objekt_nummer_zu
mitgliedschaft.rg_nummer_zu = rg_nummer_zu
mitgliedschaft.letzte_bearbeitung_von = 'SP'
mitgliedschaft.save()
create_sp_queue(mitgliedschaft, True)
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Adressen Update konnte nicht durchgeführt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
submit_counter += 1
else:
break
# --------------------------------------------------------------
# Ampel Reset
# --------------------------------------------------------------
def ampel_reset():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.ampel_reset
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import get_ampelfarbe
# neuberechnung aller roten ampeln
mitgliedschaften = frappe.db.sql("""SELECT `name` FROM `tabMitgliedschaft` WHERE `ampel_farbe` = 'ampelrot'""", as_dict=True)
total = len(mitgliedschaften)
print("Setze/Berechne Ampel bei {0} Mitgliedschaften".format(total))
submit_counter = 1
count = 1
for mitgliedschaft in mitgliedschaften:
m = frappe.get_doc("Mitgliedschaft", mitgliedschaft.name)
neue_farbe = get_ampelfarbe(m)
if neue_farbe != m.ampel_farbe:
set_neue_farbe = frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `ampel_farbe` = '{neue_farbe}' WHERE `name` = '{name}'""".format(neue_farbe=neue_farbe, name=m.name), as_list=True)
submit_counter += 1
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
print("{0} von {1}".format(count, total))
count += 1
frappe.db.commit()
# --------------------------------------------------------------
# Setze CB "Aktive Mitgliedschaft"
# --------------------------------------------------------------
def aktive_mitgliedschaft():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.aktive_mitgliedschaft
'''
print("Aktiviere aktive Mitgliedschaften...")
SQL_SAFE_UPDATES_false = frappe.db.sql("""SET SQL_SAFE_UPDATES=0""", as_list=True)
update_cb = frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `aktive_mitgliedschaft` = 1 WHERE `status_c` NOT IN ('Gestorben', 'Wegzug', 'Ausschluss', 'Inaktiv')""", as_list=True)
SQL_SAFE_UPDATES_true = frappe.db.sql("""SET SQL_SAFE_UPDATES=1""", as_list=True)
frappe.db.commit()
print("Aktive Mitgliedschaften aktiviert")
# --------------------------------------------------------------
# Tausche CB "Geschenkunterlagen an Schenker"
# --------------------------------------------------------------
def change_geschenk_cb():
'''
Example:
sudo bench --site [site_name] execute mvd.mvd.data_import.importer.change_geschenk_cb
'''
mitgliedschaften = frappe.db.sql("""SELECT `name`, `geschenkunterlagen_an_schenker` FROM `tabMitgliedschaft` WHERE `ist_geschenkmitgliedschaft` = 1""", as_dict=True)
print("Change {0} Mitgliedschaften".format(len(mitgliedschaften)))
count = 1
for m in mitgliedschaften:
if int(m.geschenkunterlagen_an_schenker) == 1:
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `geschenkunterlagen_an_schenker` = 0 WHERE `name` = '{mitgliedschaft}'""".format(mitgliedschaft=m.name), as_list=True)
else:
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `geschenkunterlagen_an_schenker` = 1 WHERE `name` = '{mitgliedschaft}'""".format(mitgliedschaft=m.name), as_list=True)
print("{0} von {1}".format(count, len(mitgliedschaften)))
count += 1
frappe.db.commit()
# --------------------------------------------------------------
# Beitritt Update
# --------------------------------------------------------------
def update_beitritt(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_beitritt --kwargs "{'site_name': 'site1.local', 'file_name': 'mitglieder_ids_2022.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import create_sp_queue
# display all coloumns for error handling
pd.set_option('display.max_rows', None, 'display.max_columns', None)
# read csv
df = pd.read_csv('/home/frappe/frappe-bench/sites/{site_name}/private/files/{file_name}'.format(site_name=site_name, file_name=file_name))
# loop through rows
count = 1
submit_counter = 1
max_loop = limit
if not limit:
index = df.index
max_loop = len(index)
for index, row in df.iterrows():
if count <= max_loop:
if frappe.db.exists("Mitgliedschaft", str(get_value(row, 'mitglied_id'))):
try:
frappe.db.sql("""UPDATE `tabMitgliedschaft` SET `zahlung_mitgliedschaft` = '2022' WHERE `name` = '{mitglied_id}'""".format(mitglied_id=str(get_value(row, 'mitglied_id'))), as_list=True)
if submit_counter == 100:
frappe.db.commit()
submit_counter = 1
else:
submit_counter += 1
except Exception as err:
frappe.log_error("{0}\n\n{1}".format(err, row), 'Beitritt Update konnte nicht durchgeführt werden')
else:
frappe.log_error("{0}".format(row), 'Mitgliedschaft existiert nicht')
print("{count} of {max_loop} --> {percent}".format(count=count, max_loop=max_loop, percent=((100 / max_loop) * count)))
count += 1
submit_counter += 1
else:
break
# --------------------------------------------------------------
# OnlinePayment Update
# --------------------------------------------------------------
def update_online_payment(site_name, file_name, limit=False):
'''
Example:
sudo bench execute mvd.mvd.data_import.importer.update_online_payment --kwargs "{'site_name': 'site1.local', 'file_name': 'mitglied_nr_paymentId_vor_7_Maerz.csv'}"
'''
from mvd.mvd.doctype.mitgliedschaft.mitgliedschaft import create_sp_queue
# display all coloumns for error handling
|
pd.set_option('display.max_rows', None, 'display.max_columns', None)
|
pandas.set_option
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Dict
from pandas import DataFrame, concat
from lib.cast import age_group
from lib.pipeline import DataSource
from lib.utils import table_rename
_SUBREGION1_CODE_MAP = {
"Harju": "37",
"Hiiu": "39",
"Ida-Viru": "45",
"Jõgeva": "50",
"Järva": "52",
"Lääne": "56",
"Lääne-Viru": "60",
"Põlva": "64",
"Pärnu": "68",
"Rapla": "71",
"Saare": "74",
"Tartu": "79",
"Valga": "81",
"Viljandi": "84",
"Võru": "87",
}
def _parse_age_bin(age_bin: str) -> str:
try:
return age_group(int(age_bin.split("-", 1)[0]))
except:
return "age_unknown"
class EstoniaDataSource(DataSource):
def parse_dataframes(
self, dataframes: Dict[str, DataFrame], aux: Dict[str, DataFrame], **parse_opts
) -> DataFrame:
data = table_rename(
dataframes[0],
{
# "id": "",
"Gender": "sex",
"AgeGroup": "age",
# "Country": "country_name",
"County": "subregion1_name",
"ResultValue": "_test_result",
"StatisticsDate": "date",
},
drop=True,
remove_regex=r"[^0-9a-z\s]",
)
data["new_tested"] = 1
data["new_confirmed"] = 0
data.loc[data["_test_result"] == "P", "new_confirmed"] = 1
data.drop(columns=["_test_result"], inplace=True)
# Translate sex labels; only male, female and unknown are given
sex_adapter = lambda x: {"M": "male", "N": "female"}.get(x, "sex_unknown")
data["sex"] = data["sex"].apply(sex_adapter)
# Normalize age group labels
data["age"] = data["age"].apply(_parse_age_bin)
# Use proper ISO codes for the subregion1 level
data["subregion1_name"] = data["subregion1_name"].str.replace(" maakond", "")
data["subregion1_code"] = data["subregion1_name"].apply(_SUBREGION1_CODE_MAP.get)
data.drop(columns=["subregion1_name"], inplace=True)
# Aggregate country-level data by adding all counties
country = (
data.drop(columns=["subregion1_code"])
.groupby(["date", "age", "sex"])
.sum()
.reset_index()
)
country["key"] = "EE"
# We can build the key for the data directly from the subregion codes
data["key"] = "EE_" + data["subregion1_code"]
# Drop bogus records from the data
data.dropna(subset=["subregion1_code"], inplace=True)
return
|
concat([country, data])
|
pandas.concat
|
"""
Predicting Lab Profitability in Washington State
Cannabis Data Science Meetup Group
Copyright (c) 2022 Cannlytics
Authors: <NAME> <<EMAIL>>
Created: 1/10/2022
Updated: 1/12/2022
License: MIT License <https://opensource.org/licenses/MIT>
Description: Using data on analyses performed by labs in Washington State,
this script calculates historic performance of each lab and uses analysis
prices to forecast the profitability of each lab over the next 5 years.
Data Sources:
- WA State Traceability Data January 2018 - November 2021
https://lcb.app.box.com/s/e89t59s0yb558tjoncjsid710oirqbgd?page=1
Resources:
- Pandas time series / date functionality
https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html
"""
# Standard imports.
import gc
import json
import re
import requests
# External imports.
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
import pandas as pd
from pandas.tseries.offsets import MonthEnd
import pmdarima as pm
import seaborn as sns
import statsmodels.api as sm
# Internal imports.
from utils import (
forecast_arima,
format_millions,
format_thousands,
)
#------------------------------------------------------------------------------
# Perform housekeeping and define useful functions.
#------------------------------------------------------------------------------
# Define format for all plots.
plt.style.use('fivethirtyeight')
plt.rcParams.update({
'font.family': 'Times New Roman',
'font.size': 20,
})
# Print floats to 2 decimal places.
pd.options.display.float_format = "{:.0f}".format
def sorted_nicely(unsorted_list):
"""Sort the given iterable in the way that humans expect.
Credit: <NAME> <https://stackoverflow.com/a/2669120/5021266>
License: CC BY-SA 2.5 <https://creativecommons.org/licenses/by-sa/2.5/>
"""
convert = lambda text: int(text) if text.isdigit() else text
alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
return sorted(unsorted_list, key = alphanum_key)
#------------------------------------------------------------------------------
# Read in and clean the laboratory data.
#------------------------------------------------------------------------------
# Define lab datasets.
lab_datasets = ['LabResults_0', 'LabResults_1', 'LabResults_2']
# Specify the column types to read.
column_types = {
'global_id' : 'string',
# 'mme_id' : 'category',
# 'type' : 'category',
# 'intermediate_type' : 'category',
# 'status' : 'category',
#'user_id' : 'string',
#'external_id' : 'string',
#'inventory_id' : 'string',
#'testing_status' : 'category',
#'batch_id' : 'string',
#'parent_lab_result_id' : 'string',
#'og_parent_lab_result_id' : 'string',
#'copied_from_lab_id' : 'string',
#'lab_user_id' : 'string',
#'foreign_matter' : 'bool',
#'moisture_content_percent' : 'float16',
#'growth_regulators_ppm' : 'float16',
#'cannabinoid_status' : 'category',
#'cannabinoid_editor' : 'float32',
#'cannabinoid_d9_thca_percent': 'float16',
#'cannabinoid_d9_thca_mg_g' : 'float16',
#'cannabinoid_d9_thc_percent' : 'float16',
#'cannabinoid_d9_thc_mg_g' : 'float16',
#'cannabinoid_d8_thc_percent' : 'float16',
#'cannabinoid_d8_thc_mg_g' : 'float16',
#'cannabinoid_cbd_percent' : 'float16',
#'cannabinoid_cbd_mg_g' : 'float16',
#'cannabinoid_cbda_percent' : 'float16',
#'cannabinoid_cbda_mg_g' : 'float16',
#'cannabinoid_cbdv_percent' : 'float16',
#'cannabinoid_cbg_percent' : 'float16',
#'cannabinoid_cbg_mg_g' : 'float16',
#'terpenoid_pinene_percent' : 'float16',
#'terpenoid_pinene_mg_g' : 'float16',
#'microbial_status' : 'category',
#'microbial_editor' : 'string',
#'microbial_bile_tolerant_cfu_g' : 'float16',
#'microbial_pathogenic_e_coli_cfu_g' : 'float16',
#'microbial_salmonella_cfu_g' : 'float16',
#'mycotoxin_status' : 'category',
#'mycotoxin_editor' : 'string',
#'mycotoxin_aflatoxins_ppb' : 'float16',
#'mycotoxin_ochratoxin_ppb' : 'float16',
#'metal_status' : 'category',
#'metal_editor': 'string',
#'metal_arsenic_ppm' : 'float16',
#'metal_cadmium_ppm' : 'float16',
#'metal_lead_ppm' : 'float16',
#'metal_mercury_ppm' : 'float16',
#'pesticide_status' : 'category',
#'pesticide_editor' : 'string',
#'pesticide_abamectin_ppm' : 'float16',
#'pesticide_acequinocyl_ppm' : 'float16',
#'pesticide_bifenazate_ppm' : 'float16',
#'pesticide_cyfluthrin_ppm' : 'float16',
#'pesticide_cypermethrin_ppm' : 'float16',
#'pesticide_etoxazole_ppm' : 'float16',
#'pesticide_flonicamid_ppm' : 'float',
#'pesticide_fludioxonil_ppm' : 'float16',
#'pesticide_imidacloprid_ppm' : 'float16',
#'pesticide_myclobutanil_ppm' : 'float16',
#'pesticide_spinosad_ppm' : 'float16',
#'pesticide_spirotetramet_ppm' : 'float16',
#'pesticide_thiamethoxam_ppm' : 'float16',
#'pesticide_trifloxystrobin_ppm' : 'float16',
#'solvent_status' : 'category',
#'solvent_editor' : 'string',
#'solvent_butanes_ppm' : 'float16',
#'solvent_heptane_ppm' : 'float16',
#'solvent_propane_ppm' : 'float16',
#'notes' : 'float32',
#'thc_percent' : 'float16',
#'moisture_content_water_activity_rate' : 'float16',
#'solvent_acetone_ppm' : 'float16',
#'solvent_benzene_ppm' : 'float16',
#'solvent_cyclohexane_ppm' : 'float16',
#'solvent_chloroform_ppm' : 'float16',
#'solvent_dichloromethane_ppm' : 'float16',
#'solvent_ethyl_acetate_ppm' : 'float16',
#'solvent_hexanes_ppm' : 'float16',
#'solvent_isopropanol_ppm' : 'float16',
#'solvent_methanol_ppm' : 'float16',
#'solvent_pentanes_ppm' : 'float16',
#'solvent_toluene_ppm' : 'float16',
#'solvent_xylene_ppm' : 'float16',
#'pesticide_acephate_ppm' : 'float16',
#'pesticide_acetamiprid_ppm' : 'float16',
#'pesticide_aldicarb_ppm' : 'float16',
#'pesticide_azoxystrobin_ppm' : 'float16',
#'pesticide_bifenthrin_ppm' : 'float16',
#'pesticide_boscalid_ppm' : 'float16',
#'pesticide_carbaryl_ppm' : 'float16',
#'pesticide_carbofuran_ppm' : 'float16',
#'pesticide_chlorantraniliprole_ppm' : 'float16'
}
# Specify the date columns.
date_columns = ['created_at']
# Specify all of the columns.
columns = list(column_types.keys()) + date_columns
# Read in the lab result data.
shards = []
for dataset in lab_datasets:
lab_data = pd.read_csv(
f'../.datasets/{dataset}.csv',
sep='\t',
encoding='utf-16',
usecols=columns,
dtype=column_types,
parse_dates=date_columns,
# nrows=10000,
# skipinitialspace=True,
)
shards.append(lab_data)
# Aggregate lab data, remove shards to free up memory.
data = pd.concat(shards)
del shards
del lab_data
gc.collect()
# Beginning cleaning the lab data.
data.dropna(subset=['global_id'], inplace=True)
data.index = data['global_id']
data = data.sort_index()
# Define lab ID for each observation.
data['lab_id'] = data['global_id'].map(lambda x: x[x.find('WAL'):x.find('.')])
# Remove attested lab results.
data = data.loc[data.lab_id != '']
# Identify all of the labs.
lab_ids = list(data['lab_id'].unique())
# Sort the alphanumeric lab IDs.
lab_ids = sorted_nicely(lab_ids)
#------------------------------------------------------------------------------
# Read in and clean the licensee data.
#------------------------------------------------------------------------------
# Specify the licensee fields
licensee_column_types = {
'global_id' : 'string',
'name': 'string',
'city': 'string',
'type': 'string',
'code': 'string',
}
# Read in the licensee data.
file_name = '../.datasets/Licensees_0.csv'
licensees = pd.read_csv(
file_name,
sep='\t',
encoding='utf-16',
usecols=list(licensee_column_types.keys()),
dtype=licensee_column_types,
)
#------------------------------------------------------------------------------
# Create day, month, year variables.
#------------------------------------------------------------------------------
def format_end_of_month(row):
"""Format a row with a 'date' column as an ISO formatted month."""
month = row['date'].month
if month < 10:
month = f'0{month}'
year = row['date'].year
day = row['date'] + MonthEnd(0)
return f'{year}-{month}-{day.day}'
def format_end_of_year(row):
"""Format a row with a 'date' column as an ISO formatted year."""
year = row['date'].year
return f'{year}-12-31'
# Add a time column.
data['date'] = pd.to_datetime(data['created_at'])
# Assign day, month, year variables.
data = data.assign(
day=data['date'].dt.date,
month=data.apply(lambda row: format_end_of_month(row), axis=1),
year=data.apply(lambda row: format_end_of_year(row), axis=1),
)
#------------------------------------------------------------------------------
# Calculate interesting lab summary statistics.
#------------------------------------------------------------------------------
# Identify the number of samples tested by each lab.
stats = {}
total_tests = 0
for lab_id in lab_ids:
lab_samples = data.loc[data['lab_id'] == lab_id]
tested_samples = len(lab_samples)
if tested_samples > 0:
code = lab_id.replace('WA', '')
lab_data = licensees.loc[licensees['code'] == code].iloc[0]
stats[lab_id] = {
'name': lab_data['name'],
'city': lab_data['city'],
'total_samples': tested_samples,
}
total_tests += tested_samples
# Calculate the market share for each lab.
lab_stats = pd.DataFrame.from_dict(stats, orient='index')
lab_stats['market_share'] = lab_stats['total_samples'] / total_tests * 100
# Print lab statistics.
statistics = ['name', 'total_samples', 'market_share', 'city']
print(lab_stats[statistics])
# Print by market share.
print(lab_stats.sort_values(by='market_share', ascending=False)[statistics])
#------------------------------------------------------------------------------
# How many analyses are being conducted by each lab on a day-to-day,
# month-to-month, and year-to-year basis?
#------------------------------------------------------------------------------
def plot_samples_by_period(data, column, thousands=False):
"""Plot samples for each lab by a given period."""
lab_ids = sorted_nicely(list(data['lab_id'].unique()))
colors = sns.color_palette('tab20', n_colors=len(lab_ids))
fig, ax = plt.subplots(figsize=(14, 6))
for count, lab_id in enumerate(lab_ids):
lab_samples = data.loc[data['lab_id'] == lab_id]
timeseries = lab_samples.groupby(
column,
as_index=False
).size()
timeseries['date'] = pd.to_datetime(timeseries[column])
timeseries.set_index('date', inplace=True)
plt.plot(
timeseries.index,
timeseries['size'],
label=lab_id,
color=colors[count],
alpha=0.6,
)
plt.ylim(0)
plt.setp(ax.get_yticklabels()[0], visible=False)
if thousands:
ax.yaxis.set_major_formatter(FuncFormatter(format_thousands))
plt.title(f'Samples Tested per {column} by Labs in Washington'.title())
plt.legend(
ncol=5,
loc='upper center',
bbox_to_anchor=(0.5, -0.05),
)
plt.savefig(f'figures/samples_tested_per_{column}_wa.png', dpi=300,
bbox_inches='tight', pad_inches=0.75, transparent=False)
plt.show()
# Plot daily samples tested by each lab.
plot_samples_by_period(data, 'day')
# Plot monthly samples tested by each lab.
plot_samples_by_period(data, 'month')
# Count yearly samples tested by each lab.
plot_samples_by_period(data, 'year', thousands=True)
#------------------------------------------------------------------------------
# Bonus: Calculate even more lab statistics.
#------------------------------------------------------------------------------
# What is the break down of analyses by sample type? By lab?
# What is the overall failure rate? By lab?
# What is the failure rate by analysis? By lab?
# What is the failure rate day-to-day, month-to-month, and year-to-year? By lab?
#------------------------------------------------------------------------------
# Forecast samples tested by lab.
# How many samples will each lab test in 2022-2026?
#------------------------------------------------------------------------------
# Define forecast horizon and forecast fix effects.
forecast_horizon = pd.date_range(
start=pd.to_datetime('2021-11-01'),
end=pd.to_datetime('2027-01-01'),
freq='M',
)
forecast_month_effects = pd.get_dummies(forecast_horizon.month)
# Create a forecast of samples tested by lab.
forecasts = {}
for lab_id in lab_ids:
# Define the training data.
training_data = data.loc[
(data['lab_id'] == lab_id) &
(data['date'] >= pd.to_datetime('2020-05-31')) &
(data['date'] <=
|
pd.to_datetime('2021-10-31')
|
pandas.to_datetime
|
import argparse
import os
from pathlib import Path
import mmcv
from mmcv import Config
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats, integrate
import pandas as pd
from mmdet.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--not-show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=int,
default=999,
help='the interval of show (ms)')
args = parser.parse_args()
return args
def retrieve_data_cfg(config_path, skip_type):
cfg = Config.fromfile(config_path)
train_data_cfg = cfg.data.train
train_data_cfg['pipeline'] = [
x for x in train_data_cfg.pipeline if x['type'] not in skip_type
]
return cfg
def parse_gt_bboxes(bboxes):
box_ratios = []
box_areas = []
for bbox in bboxes:
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
box_ratios.append(round(w / h, 2))
box_areas.append(int(w * h))
return box_ratios, box_areas
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
img_ratios = []
box_ratios = []
box_areas = []
box_nums = []
labels = []
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
# mmcv.imshow_det_bboxes(
# item['img'],
# item['gt_bboxes'],
# item['gt_labels'],
# class_names=dataset.CLASSES,
# show=not args.not_show,
# out_file=filename,
# wait_time=args.show_interval)
img = item['img']
height, width = img.shape[:2]
gt_bboxes = item['gt_bboxes']
gt_labels = item['gt_labels']
# image ratio
img_ratios.append(width / height)
# boxes info
ratios, areas = parse_gt_bboxes(gt_bboxes)
box_ratios.extend(ratios)
box_areas.extend(areas)
box_nums.append(len(areas))
# labels
labels.append(gt_labels)
progress_bar.update()
img_ratios = np.array(img_ratios)
box_ratios = np.array(box_ratios).clip(max=30)
box_areas = np.array(box_areas).clip(max=3e4)
box_nums = np.array(box_nums)
labels = np.concatenate(labels)
print(img_ratios)
print(box_ratios)
print(box_areas)
print(box_nums)
print(labels)
img_ratios = pd.Series(img_ratios, name="img_ratios")
box_ratios = pd.Series(box_ratios, name="box_ratios")
box_areas =
|
pd.Series(box_areas, name="box_areas")
|
pandas.Series
|
"""
Readers for galaxy catalogs to match to lens systems
"""
import numpy as np
import pandas as pd
import GCRCatalogs
import sqlite3
__all__ = ['DC2Reader']
class DC2Reader():
"""
Reader for cosmoDC2 galaxies supplemented with AGN
and SED information.
"""
def __init__(self, catalog_version):
self.catalog_version = catalog_version
# The columns we need to query
self.quantity_list = [
'galaxy_id',
'ra',
'dec',
'redshift_true',
'shear_1',
'shear_2_phosim',
'convergence',
'position_angle_true',
'size_true',
'size_minor_true',
'size_disk_true',
'size_minor_disk_true',
'size_bulge_true',
'size_minor_bulge_true',
'ellipticity_true',
'sersic_disk',
'sersic_bulge',
'stellar_mass_bulge',
'stellar_mass',
'totalStarFormationRate',
'morphology/spheroidHalfLightRadius',
'morphology/spheroidHalfLightRadiusArcsec',
'mag_true_r_lsst',
'mag_true_i_lsst'
]
def load_galaxy_catalog(self, catalog_filters):
catalog = GCRCatalogs.load_catalog(self.catalog_version)
dc2_galaxies = catalog.get_quantities(self.quantity_list,
catalog_filters)
dc2_galaxies_df = pd.DataFrame(dc2_galaxies)
return dc2_galaxies_df
def trim_catalog(self, full_lens_df):
# Keep only "elliptical" galaxies. Use bulge/total mass ratio as proxy.
trim_idx_ellip = np.where(full_lens_df['stellar_mass_bulge']/
full_lens_df['stellar_mass'] > 0.99)[0]
trim_lens_catalog = full_lens_df.iloc[trim_idx_ellip].reset_index(drop=True)
return trim_lens_catalog
def load_agn_catalog(self, agn_db_file, agn_trim_query):
conn = sqlite3.connect(agn_db_file)
cur = conn.cursor()
cur.execute("SELECT name FROM sqlite_master WHERE type='table';")
agn_df =
|
pd.read_sql_query("SELECT * FROM agn_params", conn)
|
pandas.read_sql_query
|
"""Utility functions for data.
(0) Define root and data directory
(1) concate_xs: Concatenate temporal and static features
(2) concate_xt: Concatenate temporal anf time features
(3) list_diff: compute the difference between two lists in order
(4) padding: put -1 values to the sequences outside of the time range
(5) index_reset: return the pandas dataset with reset indice
(6) pd_list_to_np_array: convert list of pandas to 3d array
(7) normalization: MinMax Normalizer
(8) renormalization: MinMax renormalizer
"""
import os
import warnings
# Necessary packages
import numpy as np
import pandas as pd
warnings.filterwarnings("ignore")
# Define root and data directory
ROOT_DIR = os.path.dirname(os.path.abspath(__file__))
DATA_DIR = os.path.join(ROOT_DIR, "../datasets/data/")
def concate_xs(x, s):
"""Concatenate static features to temporal feature for every time point.
Args:
x: temporal features
s: static features
Returns:
concate_x: concatenate temporal and static features
"""
concate_x = list()
for i in range(len(s[:, 0])):
temp_x = x[i]
temp_s = np.repeat(np.reshape(s[i, :], [1, -1]), len(temp_x[:, 0]), axis=0)
# -1 padding
pad_idx = sum(temp_x[:, 0] == -1)
if pad_idx > 0:
temp_s[-pad_idx:, :] = -1
# Concatenate
temp_xs = np.concatenate((temp_x, temp_s), axis=1)
concate_x = concate_x + [temp_xs]
concate_x = np.asarray(concate_x)
return concate_x
def concate_xt(x, t):
"""Concatenate time feature to temporal feature for every time point.
Args:
x: temporal features
t: time feature
Returns:
concate_x: concatenate temporal and time features
"""
concate_x = list()
for i in range(len(t[:, 0, 0])):
temp_x = x[i]
temp_t = t[i]
temp_xt = np.concatenate((temp_x, temp_t), axis=1)
concate_x = concate_x + [temp_xt]
concate_x = np.asarray(concate_x)
return concate_x
def list_diff(list1, list2):
"""Compute list differences in order.
Args:
- list1: first list
- list2: second list
Returns:
- out: list difference
"""
out = []
for ele in list1:
if not ele in list2: # noqa: E713
out.append(ele)
return out
def padding(x, max_seq_len):
"""Sequence data padding.
Args:
- x: temporal features
- max_seq_len: maximum sequence_length
Returns:
- x_hat: padded temporal features
"""
# Shape of the temporal features
seq_len, dim = x.shape
col_name = x.columns.values
# Padding (-1)
x_pad_hat = -np.ones([max_seq_len - seq_len, dim])
x_pad_hat = pd.DataFrame(x_pad_hat, columns=col_name)
x_hat =
|
pd.concat((x, x_pad_hat), axis=0)
|
pandas.concat
|
"""Tests for dynamic validator."""
from datetime import date, datetime
import numpy as np
import pandas as pd
from delphi_validator.report import ValidationReport
from delphi_validator.dynamic import DynamicValidator
class TestCheckRapidChange:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_df(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
ref_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_0_vs_many(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
time_value = datetime.combine(date.today(), datetime.min.time())
test_df = pd.DataFrame([time_value] * 5, columns=["time_value"])
ref_df = pd.DataFrame([time_value] * 1, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, time_value, "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_rapid_change_num_rows" in [
err.check_data_id[0] for err in report.raised_errors]
class TestCheckAvgValDiffs:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_se(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [1, 1, 1, 2, 0, 1],
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [np.nan] * 6,
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_val_se_n(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [1, 1, 1, 2, 0, 1],
"sample_size": [1, 1, 1, 2, 0, 1], "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df =
|
pd.DataFrame(data)
|
pandas.DataFrame
|
import random
import numpy as np
import pandas as pd
def create_final_df():
match_df, team_df = get_pro_match_df()
player_stats_df = get_prepared_player_stats_df()
# radiant_df, dire_df = get_radiant_dire_df_with_account_columns(match_df, team_df)
match_df = prepare_match_df(match_df)
match_df.drop(columns=["Radiant_Team_ID", "Dire_Team_ID"], inplace=True)
match_df = get_match_df_with_team_accounts(match_df)
# join player stats and accounts in matches together
match_df = get_merges_matches_player_stats_df(match_df, "Radiant", player_stats_df)
match_df = get_merges_matches_player_stats_df(match_df, "Dire", player_stats_df)
# drop last unnecessary column and round values
match_df.drop(columns=["Start_Time"], inplace=True)
match_df = match_df.round(2)
match_df.to_csv("./Data/completeMatches.csv", index=False)
def get_merges_matches_player_stats_df(matches_df, team_cat, player_stats_df):
match_df = matches_df.copy()
for i in range(5):
radiant_player_stats = player_stats_df.copy()
for col in radiant_player_stats.columns:
radiant_player_stats.rename(columns={col: f"{team_cat}_{i}_{col}"}, inplace=True)
match_df = match_df.merge(radiant_player_stats, left_on=f"{team_cat}_Account_ID_{i}",
right_on=f"{team_cat}_{i}_Account_ID", how="inner")
match_df = match_df[(match_df[f"{team_cat}_{i}_Start_Date"] < match_df["Start_Time"]) & (
match_df["Start_Time"] <= match_df[f"{team_cat}_{i}_End_Date"])]
match_df.drop(columns=[f"{team_cat}_{i}_Start_Date", f"{team_cat}_{i}_End_Date", f"{team_cat}_Account_ID_{i}",
f"{team_cat}_{i}_Account_ID"], inplace=True)
return match_df
def get_prepared_player_stats_df():
player_stats_df =
|
pd.read_csv("../DotaDataGathering/Data/DotaPlayerStats.csv", index_col=False, header=0)
|
pandas.read_csv
|
import numpy as np
import matplotlib.pyplot as plt
import lightkurve as lk
from scipy import interpolate
from astropy.io import fits
from astropy.wcs import WCS
from astropy.coordinates import SkyCoord
from astropy import units as u
from astropy.coordinates import SkyCoord, Angle
from copy import deepcopy
import pandas as pd
from .R_load import R_val
def Get_Catalogue(tpf, Catalog = 'gaia'):
"""
Get the coordinates and mag of all sources in the field of view from a specified catalogue.
I/347/gaia2dis Distances to 1.33 billion stars in Gaia DR2 (Bailer-Jones+, 2018)
-------
Inputs-
-------
tpf class target pixel file lightkurve class
Catalogue str Permitted options: 'gaia', 'dist', 'ps1'
--------
Outputs-
--------
coords array coordinates of sources
Gmag array Gmags of sources
"""
c1 = SkyCoord(tpf.ra, tpf.dec, frame='icrs', unit='deg')
# Use pixel scale for query size
pix_scale = 4.0 # arcseconds / pixel for Kepler, default
if tpf.mission == 'TESS':
pix_scale = 21.0
# We are querying with a diameter as the radius, overfilling by 2x.
from astroquery.vizier import Vizier
Vizier.ROW_LIMIT = -1
if Catalog == 'gaia':
catalog = "I/345/gaia2"
elif Catalog == 'dist':
catalog = "I/347/gaia2dis"
elif Catalog == 'ps1':
catalog = "II/349/ps1"
elif Catalog == 'skymapper':
catalog = 'II/358/smss'
else:
raise ValueError("{} not recognised as a catalog. Available options: 'gaia', 'dist','ps1'")
result = Vizier.query_region(c1, catalog=[catalog],
radius=Angle(np.max(tpf.shape[1:]) * pix_scale, "arcsec"))
no_targets_found_message = ValueError('Either no sources were found in the query region '
'or Vizier is unavailable')
#too_few_found_message = ValueError('No sources found brighter than {:0.1f}'.format(magnitude_limit))
if result is None:
raise no_targets_found_message
elif len(result) == 0:
raise no_targets_found_message
result = result[catalog].to_pandas()
return result
def Get_Gaia(tpf, magnitude_limit = 18, Offset = 10):
"""
Get the coordinates and mag of all gaia sources in the field of view.
-------
Inputs-
-------
tpf class target pixel file lightkurve class
magnitude_limit float cutoff for Gaia sources
Offset int offset for the boundary
--------
Outputs-
--------
coords array coordinates of sources
Gmag array Gmags of sources
"""
keys = ['objID','RAJ2000','DEJ2000','e_RAJ2000','e_DEJ2000','gmag','e_gmag','gKmag','e_gKmag','rmag',
'e_rmag','rKmag','e_rKmag','imag','e_imag','iKmag','e_iKmag','zmag','e_zmag','zKmag','e_zKmag',
'ymag','e_ymag','yKmag','e_yKmag','tmag','gaiaid','gaiamag','gaiadist','gaiadist_u','gaiadist_l',
'row','col']
result = Get_Catalogue(tpf, Catalog = 'gaia')
result = result[result.Gmag < magnitude_limit]
if len(result) == 0:
raise no_targets_found_message
radecs = np.vstack([result['RA_ICRS'], result['DE_ICRS']]).T
coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is origin supposed to be zero or one?
Gmag = result['Gmag'].values
#Jmag = result['Jmag']
ind = (((coords[:,0] >= -10) & (coords[:,1] >= -10)) &
((coords[:,0] < (tpf.shape[1] + 10)) & (coords[:,1] < (tpf.shape[2] + 10))))
coords = coords[ind]
Gmag = Gmag[ind]
Tmag = Gmag - 0.5
#Jmag = Jmag[ind]
return coords, Tmag
def mag2flux(mag,zp):
f = 10**(2/5*(zp-mag))
return f
def PS1_to_TESS_mag(PS1,ebv = 0):
zp = 25
gr = (PS1.gmag - PS1.rmag).values
eg, e = R_val('g',gr=gr,ext=ebv); er, e = R_val('r',gr=gr,ext=ebv)
ei, e = R_val('i',gr=gr,ext=ebv); ez, e = R_val('z',gr=gr,ext=ebv)
ey, e = R_val('y',gr=gr,ext=ebv); et, e = R_val('tess',gr=gr,ext=ebv)
eg = eg * ebv; er = er * ebv; ei = ei * ebv; ez = ez * ebv
ey = ey * ebv; et = et * ebv
g = mag2flux(PS1.gmag.values - eg,zp)
r = mag2flux(PS1.rmag.values - er,zp)
i = mag2flux(PS1.imag.values - ei,zp)
z = mag2flux(PS1.zmag.values - ez,zp)
y = mag2flux(PS1.ymag.values - ey,zp)
cr = 0.25582823; ci = 0.27609407; cz = 0.35809516
cy = 0.11244277; cp = 0.00049096
t = (cr*r + ci*i + cz*z + cy*y)*(g/i)**cp
t = -2.5*np.log10(t) + zp + et
PS1['tmag'] = t
return PS1
def SM_to_TESS_mag(SM,ebv = 0):
zp = 25
gr = (SM.gmag - SM.rmag).values
eg, e = R_val('g',gr=gr,ext=ebv,system='skymapper')
er, e = R_val('r',gr=gr,ext=ebv,system='skymapper')
ei, e = R_val('i',gr=gr,ext=ebv,system='skymapper')
ez, e = R_val('z',gr=gr,ext=ebv,system='skymapper')
et, e = R_val('tess',gr=gr,ext=ebv)
eg = eg * ebv; er = er * ebv; ei = ei * ebv
ez = ez * ebv; et = et * ebv
g = mag2flux(SM.gmag.values - eg,zp)
r = mag2flux(SM.rmag.values - er,zp)
i = mag2flux(SM.imag.values - ei,zp)
z = mag2flux(SM.zmag.values - ez,zp)
cr = 0.25825435; ci = 0.35298213
cz = 0.39388206; cp = -0.00170817
t = (cr*r + ci*i + cz*z)*(g/i)**cp
t = -2.5*np.log10(t) + zp + et
SM['tmag'] = t
return SM
def Get_PS1(tpf, magnitude_limit = 18, Offset = 10):
"""
Get the coordinates and mag of all PS1 sources in the field of view.
-------
Inputs-
-------
tpf class target pixel file lightkurve class
magnitude_limit float cutoff for Gaia sources
Offset int offset for the boundary
--------
Outputs-
--------
coords array coordinates of sources
Gmag array Gmags of sources
"""
result = Get_Catalogue(tpf, Catalog = 'ps1')
result = result[np.isfinite(result.rmag) & np.isfinite(result.imag)]# & np.isfinite(result.zmag)& np.isfinite(result.ymag)]
result = PS1_to_TESS_mag(result)
result = result[result.tmag < magnitude_limit]
if len(result) == 0:
raise no_targets_found_message
radecs = np.vstack([result['RAJ2000'], result['DEJ2000']]).T
coords = tpf.wcs.all_world2pix(radecs, 0) ## TODO, is origin supposed to be zero or one?
Tessmag = result['tmag'].values
#Jmag = result['Jmag']
ind = (((coords[:,0] >= -10) & (coords[:,1] >= -10)) &
((coords[:,0] < (tpf.shape[1] + 10)) & (coords[:,1] < (tpf.shape[2] + 10))))
coords = coords[ind]
Tessmag = Tessmag[ind]
#Jmag = Jmag[ind]
return coords, Tessmag
def Skymapper_df(sm):
a = np.zeros(len(sm['ObjectId']),dtype=np.object)
a[:] = 's'
b = sm['ObjectId'].values.astype(str).astype(np.object)
obj = a+b
keep = ['objID','RAJ2000', 'DEJ2000','e_RAJ2000','e_DEJ2000','gmag', 'e_gmag', 'gKmag',
'e_gKmag', 'rmag', 'e_rmag', 'rKmag', 'e_rKmag',
'imag', 'e_imag', 'iKmag', 'e_iKmag', 'zmag', 'e_zmag',
'zKmag', 'e_zKmag', 'ymag', 'e_ymag', 'yKmag', 'e_yKmag',
'tmag']
df =
|
pd.DataFrame(columns=keep)
|
pandas.DataFrame
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright 2019 by <NAME> (www.robo.guru)
# All rights reserved.
# This file is part of Agenoria and is released under the MIT License.
# Please see the LICENSE file that should have been included as part of
# this package.
import datetime as dt
from dateutil.relativedelta import relativedelta
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from pandas.plotting import register_matplotlib_converters
from .parse_config import parse_json_config
from .plot_settings import format_monthly_plot, export_figure
# Debug option
DEBUG = False
DEBUG_START_DATE = dt.datetime(2019, 8, 17, 0, 0, 0)
DEBUG_END_DATE = dt.datetime(2019, 9, 27, 0, 0, 0)
# Parameters from JSON
config = []
def count_pee_poop(row):
# Return variables
pee = 0
poop = 0
# Parse
key = row['In the diaper']
if (key == 'pee'): # Pee only
pee += 1
elif (key == 'poo'):
poop += 1
elif (key == 'pee and poo'):
pee += 1
poop += 1
return pee, poop
def parse_glow_diaper_data(file_name):
# Import file
data_diaper = pd.read_csv(file_name, parse_dates=['Diaper time'])
# Sort by date and time
data_diaper = data_diaper.sort_values(by=['Diaper time'], ascending=False)
# Make a new column with date component only
data_diaper['Date'] = data_diaper['Diaper time'].dt.normalize()
# Find first and last entry in column
start_date = data_diaper['Date'].iloc[-1]
end_date = data_diaper['Date'].iloc[0]
if (DEBUG):
start_date = DEBUG_START_DATE
end_date = DEBUG_END_DATE
# Final data
diaper_data_list = []
cumulative_diaper_count = 0
# Diaper
for current_date in pd.date_range(start_date, end_date):
# Get all entires on this date
rows_on_date = data_diaper[data_diaper['Date'].isin([current_date])]
# Compute total diaper count
daily_total_diaper_count = rows_on_date['In the diaper'].count()
cumulative_diaper_count += rows_on_date['In the diaper'].count()
# Separate pees and poops
total_pee_count = 0
total_poop_count = 0
for index, diaper_event in rows_on_date.iterrows():
pee, poop = count_pee_poop(diaper_event)
total_pee_count += pee
total_poop_count += poop
# Compute poop to total diaper change ratio
poop_ratio = (total_poop_count / daily_total_diaper_count) * 100
# Compute diaper day duration
diaper_final = rows_on_date['Diaper time'].iloc[0]
diaper_first = rows_on_date['Diaper time'].iloc[-1]
diaper_day_duration = (
diaper_final - diaper_first).total_seconds() / 3600
# Compute average time between diaper changes
diaper_change_time_avg = diaper_day_duration / daily_total_diaper_count
# Put stats in a list
diaper_data_list.append(
[current_date, daily_total_diaper_count, cumulative_diaper_count,
total_pee_count, total_poop_count, poop_ratio,
diaper_change_time_avg])
# Convert list to dataframe
daily_diaper_data = pd.DataFrame(
diaper_data_list, columns=['date', 'daily_total_diaper_count',
'cumulative_diaper_count', 'pee_count',
'poop_count', 'poop_ratio',
'diaper_change_time_avg'])
return daily_diaper_data
def get_abnormal_days(diaper_data):
# Constipation monthly - days with zero poop
constipation_days = diaper_data.loc[diaper_data['poop_count'] == 0]
constipation_days = constipation_days.set_index(constipation_days['date'])
constipation_monthly = constipation_days['daily_total_diaper_count'].resample(
'BMS').count()
# Diarrhea monthly - days with high percentage of poops
CUTOFF = 65
diarrhea_days = diaper_data.loc[diaper_data['poop_ratio'] >= CUTOFF]
diarrhea_days = diarrhea_days.set_index(diarrhea_days['date'])
diarrhea_monthly = diarrhea_days['daily_total_diaper_count'].resample(
'BMS').count()
return constipation_monthly, diarrhea_monthly
def get_diaper_monthly_data(diaper_data):
# Reindex
monthly_data = diaper_data.set_index(diaper_data['date'])
# Compute monthly total
diaper_monthly_data = monthly_data['daily_total_diaper_count'].resample(
'BMS').sum()
return diaper_monthly_data
def plot_diaper_charts(config_file):
# Matplotlib converters
|
register_matplotlib_converters()
|
pandas.plotting.register_matplotlib_converters
|
import datetime
import pandas as pd
_MONTHS = [
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
]
_ORGS = {
1: "HealthMap",
2: "Tephinet",
3: "Ending Pandemics",
4: "ProMed",
5: "EpiCore",
6: "MSF - Spain",
7: "GeoSentinel",
}
def clean_pair(pair):
if len(pair) > 2:
pair = [pair[0], "=".join(pair[1:])]
return pair
elif len(pair) <= 1:
raise TypeError(
"Pair must be an iterable of length 2. Is there a blank row in your .env file?")
return pair
def load_env(path):
"""Load environment variables from .env."""
with open(path) as f:
lines = f.readlines()
pairs = [x.split("=") for x in lines]
pairs = [clean_pair(p) for p in pairs]
env = {k: v.replace("\n", "") for k, v in pairs}
try:
return env['DATA_DIR'], env['IMAGE_DIR'], env['SAVE_DATA_DIR']
except KeyError as e:
print("Environment variable missing:")
print(e)
_, _, _SAVE_DATA_DIR = load_env("../.env")
def clean_dates(df):
"""Convert dates to datetime and add reaction time."""
date_cols = [x for x in df.columns if "_date" in x]
for col in date_cols:
df[col] = pd.to_datetime(df[col], errors="coerce")
df["reaction_time"] = df["first_response_date"] - df["iso_create_date"]
return df
def clean_countries(df):
"""Clean names and fix repeats."""
# strip leading and trail spaces in country name
df["country"] = df["country"].str.strip()
# fix repeat countries
df = df.replace({"United States": "USA"})
return df
def get_last_month_year():
"""Return last month and associated year as ints."""
today = datetime.date.today()
first_day_of_month = today.replace(day=1)
last_day_last_month = first_day_of_month - datetime.timedelta(days=1)
last_month = last_day_last_month.month
year = last_day_last_month.year
return last_month, year
def get_masks(df, dt_col):
"""Get boolean masks for filtering by timeframe."""
last_month, year = get_last_month_year()
last_month = (df[dt_col].dt.month == last_month) & (
df[dt_col].dt.year == year)
ytd = df[dt_col].dt.year == year
last_year = df[dt_col].dt.year == (year - 1)
return last_month, ytd, last_year
def get_response_str(td):
"""Convert timedelta to days/hours/minutes string."""
if not isinstance(td, datetime.timedelta):
return "N/A"
days, hours, minutes = td.days, td.seconds // 3600, (td.seconds // 60) % 60
rt_list = []
if days != 0:
rt_list.append("{} days".format(days))
rt_list.append("{}h".format(hours))
rt_list.append("{}min".format(minutes))
return " ".join(rt_list)
def create_closed_rfis(df):
"""Create and save closed RFIs report."""
last_month, year = get_last_month_year()
mask = (df.action_date.dt.month == last_month) & (
df.action_date.dt.year == year)
actions_last_month = df.loc[mask, [
"action_date", "status", "outcome"]].copy()
closed_rfis_df = (
actions_last_month
.loc[actions_last_month.status == "C"]
.copy()
)
total_closed = closed_rfis_df.shape[0]
verified = closed_rfis_df.outcome.apply(
lambda x: 1 if "Verified" in x else 0).sum()
updated = closed_rfis_df.outcome.apply(
lambda x: 1 if "Updated" in x else 0).sum()
unverified = closed_rfis_df.outcome.apply(
lambda x: 1 if "Unverified" in x else 0
).sum()
categories = ["Verified (+/-)", "Updated (+/-)",
"Verified+Updated", "Unverified"]
counts = [verified, updated, verified + updated, unverified]
percents = [round((float(x) / float(total_closed)) * 100, 1)
if total_closed != 0 else 0.00 for x in counts]
report = pd.DataFrame(
{"Outcome": categories, "Closed ({})".format(
total_closed): counts, "Percent": percents}
)
report.to_html(_SAVE_DATA_DIR + "closed_rfis.html", index=False)
return report
def create_opened_rfis(df):
"""Create and save opened RFIs report."""
last_month, _, _ = get_masks(df, "create_date")
opened_rfis_df = df.loc[last_month].copy()
total_opened_rfis = opened_rfis_df.shape[0]
report = opened_rfis_df["organization_id"].value_counts().reset_index()
missing_index = [x for x in [5, 7, 1, 6, 4] if x not in list(report['index'])]
missing_oid = [0 for x in missing_index]
placeholders = pd.DataFrame({
"index": missing_index,
"organization_id": missing_oid
})
report = pd.concat([report, placeholders], axis=0).sort_values("index")
opened_count_col = "Opened ({})".format(total_opened_rfis)
report.columns = ["Organization", opened_count_col]
report["Organization"] = report["Organization"].map(_ORGS)
report["Percent"] = report[opened_count_col].apply(
lambda x: round((float(x) / float(total_opened_rfis)) * 100, 1)
)
report.to_html(_SAVE_DATA_DIR + "opened_rfis.html", index=False)
return report
def get_stats_from_mask(df, mask, time_frame):
"""Get dictionary of response stats for time frame."""
mask_closed = df.status == "C"
mask_responded = ~df["first_response_date"].isna()
less_than_24 = float((
df.loc[mask, "reaction_time"] < datetime.timedelta(hours=24))
.sum()
)
closed = float(df.loc[mask_closed & mask].shape[0])
responded = float(df.loc[mask_responded & mask].shape[0])
if closed == 0:
response_rate = "N/A"
else:
response_rate = "{:.1%}".format(responded / closed)
# Exclude minimum and maximum values from avg response time calc
rt_df = df.loc[mask].copy()
min_rt = rt_df.reaction_time.min()
max_rt = rt_df.reaction_time.max()
mask_trunc = (rt_df.reaction_time > min_rt) & (rt_df.reaction_time < max_rt)
response_time = get_response_str(rt_df.loc[mask_trunc, "reaction_time"].mean())
if responded == 0:
responded_in_24 = "N/A"
else:
responded_in_24 = "{:.1%}".format(less_than_24 / responded)
return {
"Time Frame": time_frame,
"Closed": int(closed),
"Responded": int(responded),
"Response Rate": response_rate,
"Response Time": response_time,
"Responded in 24hrs": responded_in_24,
}
def create_rfi_response_metrics(df):
"""Create and save RFI response report."""
last_month, year = get_last_month_year()
month_str = _MONTHS[last_month - 1]
mask_last_month, mask_ytd, mask_last_year = get_masks(df, "action_date")
report = pd.DataFrame(
[
get_stats_from_mask(df, mask_last_month,
"{} {}".format(month_str, year)),
get_stats_from_mask(df, mask_ytd, str(year)),
get_stats_from_mask(df, mask_last_year, str(year - 1)),
]
)
report.to_html(_SAVE_DATA_DIR + "rfi_response_metrics.html", index=False)
return report
def get_verification_from_mask(df, mask):
"""Create verified/unverified aggs for time frame."""
is_verified = ["Verified (+)", "Verified (-)",
"Updated (+)", "Updated (-)"]
df = df[mask].copy()
if(df.shape[0] == 0):
unverified_report =
|
pd.DataFrame(columns=["Country", "Unverified"])
|
pandas.DataFrame
|
# -*- coding: utf-8 -*-
"""
Functions for importing mssql data.
"""
import pandas as pd
import numpy as np
from datetime import datetime
from pdsql.util import create_engine, get_pk_stmt, compare_dfs
try:
from geopandas import GeoDataFrame
from shapely.wkb import loads
from pycrs import parse
except ImportError:
print('Install geopandas for reading geometery columns')
def rd_sql(server, database, table=None, col_names=None, where_in=None, where_op='AND', geo_col=False, from_date=None, to_date=None, date_col=None, rename_cols=None, stmt=None, con=None):
"""
Function to import data from an MSSQL database.
Parameters
----------
server : str
The server name. e.g.: 'SQL2012PROD03'
database : str
The specific database within the server. e.g.: 'LowFlows'
table : str
The specific table within the database. e.g.: 'LowFlowSiteRestrictionDaily'
col_names : list of str
The column names that should be retrieved. e.g.: ['SiteID', 'BandNo', 'RecordNo']
where_in : dict
A dictionary of strings to lists of strings.'. e.g.: {'SnapshotType': ['value1', 'value2']}
where_op : str
If where_in is a dictionary and there are more than one key, then the operator that connects the where statements must be either 'AND' or 'OR'.
geo_col : bool
Is there a geometry column in the table?.
from_date : str
The start date in the form '2010-01-01'.
to_date : str
The end date in the form '2010-01-01'.
date_col : str
The SQL table column that contains the dates.
rename_cols: list of str
List of strings to rename the resulting DataFrame column names.
stmt : str
Custom SQL statement to be directly passed to the database. This will ignore all prior arguments except server and database.
con : SQLAlchemy connectable (engine/connection) or database string URI
The sqlalchemy connection to be passed to pandas.read_sql
Returns
-------
DataFrame
"""
## Create where statements
if stmt is None:
if table is None:
raise ValueError('Must at least provide input for server, database, and table.')
if col_names is not None:
if isinstance(col_names, str):
col_names = [col_names]
col_names1 = ['[' + i.encode('ascii', 'ignore').decode() + ']' for i in col_names]
col_stmt = ', '.join(col_names1)
else:
col_stmt = '*'
where_lst, where_temp = sql_where_stmts(where_in=where_in, where_op=where_op, from_date=from_date, to_date=to_date, date_col=date_col)
if isinstance(where_lst, list):
stmt1 = "SELECT " + col_stmt + " FROM " + table + " where " + " and ".join(where_lst)
else:
stmt1 = "SELECT " + col_stmt + " FROM " + table
elif isinstance(stmt, str):
where_temp = {}
stmt1 = stmt
else:
raise ValueError('stmt must either be an SQL string or None.')
## Create connection to database and execute sql statement
if geo_col & (stmt is None):
df = rd_sql_geo(server=server, database=database, table=table, col_stmt=col_stmt, where_lst=where_lst)
if rename_cols is not None:
rename_cols1 = rename_cols.copy()
rename_cols1.extend(['geometry'])
df.columns = rename_cols1
else:
if con is None:
engine = create_engine('mssql', server, database)
with engine.begin() as conn:
if where_temp:
for key, value in where_temp.items():
df = pd.DataFrame(data=value, columns=[key.lower()])
temp_tab = '#temp_'+key.lower()
df.to_sql(temp_tab, con=conn, if_exists='replace', index=False, chunksize=1000)
df = pd.read_sql(stmt1, con=conn)
else:
if where_temp:
for key, value in where_temp.items():
df =
|
pd.DataFrame(data=value, columns=[key])
|
pandas.DataFrame
|
"""
This script analyzes Python call expressions.
It accepts
* path to csv file with FQ names.
* path to the folder where to save the stats.
* path to the csv file with labeled projects by python version.
For each unique call expression, the number of projects in which it occurs is counted, keeping their category.
It is also possible to group statistics by language version of Python.
"""
import argparse
import logging
from pathlib import Path
from typing import Optional
from call_expressions_column import CallExpressionsColumn
import pandas as pd
logging.basicConfig(level=logging.INFO)
def configure_parser(parser: argparse.ArgumentParser):
parser.add_argument(
'--input',
type=lambda value: Path(value).absolute(),
help='path to csv file with FQ names',
required=True,
)
parser.add_argument(
'--output',
type=lambda value: Path(value).absolute(),
help='path to the folder where to save the stats',
required=True,
)
parser.add_argument(
'--python-versions',
type=lambda value: Path(value).absolute(),
help='path to the csv file with labeled projects by python version',
)
def collect_stats(fq_names: pd.DataFrame) -> pd.DataFrame:
total_stats = fq_names.drop(CallExpressionsColumn.CATEGORY.value, axis=1).drop_duplicates()
total_stats = total_stats.value_counts([CallExpressionsColumn.FQ_NAME.value])
total_stats = total_stats.reset_index(name=CallExpressionsColumn.TOTAL.value)
grouped_stats = fq_names.groupby([CallExpressionsColumn.FQ_NAME.value, CallExpressionsColumn.CATEGORY.value])
stats = grouped_stats[CallExpressionsColumn.PROJECT_NAME.value].count()
stats = stats.reset_index(name=CallExpressionsColumn.COUNT.value)
stats = stats.pivot(
index=CallExpressionsColumn.FQ_NAME.value,
columns=CallExpressionsColumn.CATEGORY.value,
values=CallExpressionsColumn.COUNT.value,
)
stats.fillna(0, inplace=True)
stats = stats.astype(int)
stats.reset_index(inplace=True)
stats = stats.merge(total_stats, on=CallExpressionsColumn.FQ_NAME.value)
logging.info(f'Processed {len(stats)} unique FQ names.')
return stats
def main():
parser = argparse.ArgumentParser()
configure_parser(parser)
args = parser.parse_args()
input_path: Path = args.input
output_path: Path = args.output
python_versions_path: Optional[Path] = args.python_versions
output_path.mkdir(parents=True, exist_ok=True)
fq_names = pd.read_csv(input_path, keep_default_na=False)
logging.info(f'Received {len(fq_names)} FQ names.')
if python_versions_path is None:
stats = collect_stats(fq_names)
stats.to_csv(output_path / 'call_expressions_stats.csv', index=False)
logging.info('Saving call expressions stats.')
else:
python_versions =
|
pd.read_csv(python_versions_path, keep_default_na=False, na_values='')
|
pandas.read_csv
|
from copy import deepcopy
from typing import List
from typing import Tuple
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from etna.datasets import generate_ar_df
from etna.datasets.tsdataset import TSDataset
from etna.transforms import AddConstTransform
from etna.transforms import FilterFeaturesTransform
from etna.transforms import LagTransform
from etna.transforms import MaxAbsScalerTransform
from etna.transforms import OneHotEncoderTransform
from etna.transforms import SegmentEncoderTransform
from etna.transforms import TimeSeriesImputerTransform
@pytest.fixture()
def tsdf_with_exog(random_seed) -> TSDataset:
df_1 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_2 = pd.DataFrame.from_dict({"timestamp": pd.date_range("2021-02-01", "2021-07-01", freq="1d")})
df_1["segment"] = "Moscow"
df_1["target"] = [x ** 2 + np.random.uniform(-2, 2) for x in list(range(len(df_1)))]
df_2["segment"] = "Omsk"
df_2["target"] = [x ** 0.5 + np.random.uniform(-2, 2) for x in list(range(len(df_2)))]
classic_df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(classic_df)
classic_df_exog = generate_ar_df(start_time="2021-01-01", periods=600, n_segments=2)
classic_df_exog.rename(columns={"target": "exog"}, inplace=True)
df_exog = TSDataset.to_dataset(classic_df_exog)
ts = TSDataset(df=df, df_exog=df_exog, freq="1D")
return ts
@pytest.fixture()
def df_and_regressors() -> Tuple[pd.DataFrame, pd.DataFrame, List[str]]:
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame({"timestamp": timestamp, "regressor_1": 1, "regressor_2": 2, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "regressor_1": 3, "regressor_2": 4, "segment": "2"})
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
return df, df_exog, ["regressor_1", "regressor_2"]
@pytest.fixture()
def df_and_regressors_flat() -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Return flat versions of df and df_exog."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp[5:], "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
timestamp = pd.date_range("2020-12-01", "2021-02-11")
df_1 = pd.DataFrame(
{"timestamp": timestamp, "regressor_1": 1, "regressor_2": "3", "regressor_3": 5, "segment": "1"}
)
df_2 = pd.DataFrame(
{"timestamp": timestamp[5:], "regressor_1": 2, "regressor_2": "4", "regressor_3": 6, "segment": "2"}
)
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog["regressor_2"] = df_exog["regressor_2"].astype("category")
df_exog["regressor_3"] = df_exog["regressor_3"].astype("category")
return df, df_exog
@pytest.fixture
def ts_with_categoricals():
timestamp = pd.date_range("2021-01-01", "2021-01-05")
df_1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df_2 = pd.DataFrame({"timestamp": timestamp, "target": 12, "segment": "2"})
df = pd.concat([df_1, df_2], ignore_index=True)
df = TSDataset.to_dataset(df)
timestamp = pd.date_range("2021-01-01", "2021-01-06")
categorical_values = ["1", "2", "1", "2", "1", "2"]
df_1 = pd.DataFrame(
{"timestamp": timestamp, "regressor": categorical_values, "not_regressor": categorical_values, "segment": "1"}
)
df_2 = pd.DataFrame(
{"timestamp": timestamp, "regressor": categorical_values, "not_regressor": categorical_values, "segment": "2"}
)
df_exog = pd.concat([df_1, df_2], ignore_index=True)
df_exog = TSDataset.to_dataset(df_exog)
ts = TSDataset(df=df, freq="D", df_exog=df_exog, known_future=["regressor"])
return ts
@pytest.fixture()
def ts_future(example_reg_tsds):
future = example_reg_tsds.make_future(10)
return future
@pytest.fixture
def df_segments_int():
"""DataFrame with integer segments."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 3, "segment": 1})
df2 = pd.DataFrame({"timestamp": timestamp, "target": 4, "segment": 2})
df = pd.concat([df1, df2], ignore_index=True)
return df
def test_check_endings_error():
"""Check that _check_endings method raises exception if some segments end with nan."""
timestamp = pd.date_range("2021-01-01", "2021-02-01")
df1 = pd.DataFrame({"timestamp": timestamp, "target": 11, "segment": "1"})
df2 = pd.DataFrame({"timestamp": timestamp[:-5], "target": 12, "segment": "2"})
df =
|
pd.concat([df1, df2], ignore_index=True)
|
pandas.concat
|
import os, functools
import pandas as pd
import numpy as np
import seaborn as sns
from scipy.stats import pearsonr, spearmanr
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.colors import ListedColormap
from sklearn.preprocessing import minmax_scale
from matplotlib import colors
from utilities.statistical_tests import stability_summary_calculator, correlation_df
from loguru import logger
from GEN_Utils import FileHandling
logger.info('Import OK')
published_stabilities = f'results\lysate_denaturation/published_datasets/dataset_details.xlsx'
measured_stabilitites = f'results/lysate_denaturation/cM_correlation/measured_summary.xlsx'
correlations = f'results/lysate_denaturation/cM_correlation/correlations.xlsx'
output_folder = 'results/lysate_denaturation/plot_correlation/'
cluster_colors = {4: 'royalblue', 2: 'firebrick', 3: 'rebeccapurple', 1: 'darkorange'}
font = {'family' : 'arial',
'weight' : 'normal',
'size' : 14 }
matplotlib.rc('font', **font)
plt.rcParams['svg.fonttype'] = 'none'
if not os.path.exists(output_folder):
os.makedirs(output_folder)
def remove_labels(ax, i, j, xlabel, ylabel):
keep_x, keep_y = (None, None)
if j == 0:
keep_y=True
if i == 4:
keep_x=True
if keep_y is None:
keep_y=False
if keep_x is None:
keep_x=False
if keep_x == False:
ax.set_xlabel('')
else:
ax.set_xlabel(xlabel)
if keep_y == False:
ax.set_ylabel('')
else:
ax.set_ylabel(ylabel)
ax.set_xticklabels([])
ax.set_xticks([])
ax.set_yticklabels([])
ax.set_yticks([])
def linear_regression(x, y, data, title=None):
"""
Plots scatter plot with linear regression and 95% ci's
Returns prediction dataframe
Inputs:
x, y: str column names within dataframe
data: dataframe
title (optional): str
"""
y_data = data[y]
x_data = data[x]
X = sm.add_constant(x_data)
res = sm.OLS(y_data, X).fit()
st, data, ss2 = summary_table(res, alpha=0.05)
preds = pd.DataFrame.from_records(data, columns=[s.replace('\n', ' ') for s in ss2])
preds['x_data'] = list(x_data)
preds['y_data'] = list(y_data)
preds = preds.sort_values(by='x_data')
fig, ax = plt.subplots()
plt.scatter(x_data, y_data)
plt.plot(preds['x_data'], preds['Predicted Value'], c='red', alpha=0.5, linestyle='--')
ax.fill_between(preds['x_data'], preds['Mean ci 95% low'], preds['Mean ci 95% upp'], color='red', alpha=0.2)
plt.ylabel(y)
plt.xlabel(x)
if title:
plt.title(title)
return preds
# define plotting function
def hexbin(x, y, **kwargs):
plt.hexbin(x, y, gridsize=20, linewidths=0, **kwargs)
def corrfunc(x, y, r_xy=(.1, .9), p_xy=(.5, .9), **kws):
data = pd.DataFrame()
data['y'] = y
data['x'] = x
data.dropna(inplace=True)
(r, p) = spearmanr(data['x'], data['y'])
ax = plt.gca()
ax.annotate(f'r = {r:.2f}',
xy=r_xy, xycoords=ax.transAxes)
ax.annotate(f'p = {p:.3f}',
xy=p_xy, xycoords=ax.transAxes)
def visualise_correlations(comparison, correlations, filter_type, output_folder):
if not os.path.exists(f'{output_folder}{filter_type}/'):
os.makedirs(f'{output_folder}{filter_type}/')
# 2. Compare dataset against all published
comparison['color'] = comparison['cluster'].map(cluster_colors)
for resource in resources:
fig, ax = plt.subplots()
sns.scatterplot(x='peptide_stability', y=f'{resource}', data=comparison, hue='cluster', palette=cluster_colors, s=100)
sns.regplot(comparison['peptide_stability'], comparison[f'{resource}'], scatter_kws={'s': 0}, color='grey', robust=True)
plt.legend(bbox_to_anchor=(1.0, 1.0))
plt.ylabel('Normalised stability')
plt.xlabel('Measured Cm (M)')
plt.title(resource)
plt.savefig(f'{output_folder}{filter_type}/{resource}.png')
plt.show()
# 3. Compare each cluster against published datasets
for resource in resources:
for cluster, df in comparison[['peptide_stability', f'{resource}', 'cluster']].groupby('cluster'):
if len(df) > 5: # prevent plotting clusters with only 2 or 3 values that look like false correlations
fig, ax = plt.subplots()
sns.scatterplot(x='peptide_stability', y=f'{resource}', data=df, color=cluster_colors[cluster], s=100, label=cluster)
## add reg lines for individual cluster_colors
sns.regplot(df['peptide_stability'], df[f'{resource}'], scatter_kws={'s': 0}, line_kws={'linestyle': '--'},color=cluster_colors[cluster], truncate=False)
sns.regplot(comparison['peptide_stability'], comparison[f'{resource}'], scatter_kws={'s': 0}, color='grey', truncate=False, robust=True)
plt.legend(bbox_to_anchor=(1.0, 1.0))
plt.ylabel('Normalised stability')
plt.xlabel('Measured Cm (M)')
plt.title(resource)
plt.savefig(f'{output_folder}{filter_type}/clustered_{resource}.png')
plt.savefig(f'{output_folder}{filter_type}/clustered_{resource}.svg')
plt.show()
# 4. Plot individual panels for each cluster against each resource
for resource in resources:
for cluster, df in comparison[['peptide_stability', f'{resource}', 'cluster']].groupby('cluster'):
fig, ax = plt.subplots()
sns.scatterplot(x='peptide_stability', y=f'{resource}', data=df, color=cluster_colors[cluster], s=100)
# sns.regplot(comparison['peptide_stability'], comparison[f'{resource}'], scatter_kws={'s': 0}, color='grey', truncate=False, robust=True, label='All data')
if len(df) > 5: # prevent plotting clusters with only 2 or 3 values that look like false correlations
## add reg lines for individual cluster_colors
sns.regplot(df['peptide_stability'], df[f'{resource}'], scatter_kws={'s': 0}, line_kws={'linestyle': '--'},color=cluster_colors[cluster], truncate=False, label=f'Cluster {cluster}')
corrfunc(df['peptide_stability'], df[f'{resource}'], r_xy=(0.82, 0.92), p_xy=(0.82, 0.85))
plt.title(f'Cluster {cluster}')
plt.ylim(-0.05, 1.05)
# plt.xlim(-0.05, 6.05)
plt.ylabel('Normalised stability')
plt.xlabel('Measured Cm (M)')
plt.title(f'{resource}')
plt.savefig(f'{output_folder}{filter_type}/{cluster}_{resource}.png')
plt.show()
def visualise_summary(comparison, correlations, filter_type, output_folder, labels='text'):
cmap = sns.diverging_palette(200, 340, s=100, l=30, n=9, center="light", as_cmap=True)
# 5. Generate summary plot of published correlation (including R and significance)
# generate scatterplot
fig, ax = plt.subplots(figsize=(20, 5))
sns.scatterplot(x='x_pos', y='y_pos', data=correlations, size='size', hue='spearmans_r', palette=cmap, sizes=(100, 1000), hue_norm=(-1, 1))
# h, l = ax.get_legend_handles_labels()
plt.legend(bbox_to_anchor=(1.0, 1.0), )
if labels == 'text':
plt.yticks(ticks=list(np.arange(0, len(correlations['y_pos'].unique()))), labels=reversed(['All', 'Cluster 1', 'Cluster 2', 'Cluster 3', 'Cluster 4']))
plt.xticks(ticks=list(positions.values()), labels=(list(positions.keys())), rotation=90)
elif labels == 'numeric':
plt.yticks(ticks=list(np.arange(0, len(correlations['y_pos'].unique()))), labels=reversed(['All', 'Cluster 1', 'Cluster 2', 'Cluster 3', 'Cluster 4']))
plt.xticks(ticks=np.arange(0, len(positions.values())), labels=(np.arange(1, len(positions.values())+1))) # to cope with reversed positions above
plt.ylim(-0.5, 4.5)
plt.xlabel(None)
plt.ylabel(None)
plt.savefig(f'{output_folder}{filter_type}correlation_summary.png')
plt.savefig(f'{output_folder}{filter_type}correlation_summary.svg')
# create custom colorbar
fig, ax = plt.subplots(figsize=(6, 1))
fig.subplots_adjust(bottom=0.5)
norm = matplotlib.colors.Normalize(vmin=-1, vmax=1)
cb1 = matplotlib.colorbar.ColorbarBase(ax, cmap=cmap, norm=norm, orientation='horizontal')
cb1.set_label("Spearman's R")
plt.savefig(f'{output_folder}custom_colourbar.svg')
def visualise_heatmap(comparison, correlations, filter_type, output_folder, labels='text'):
cmap = sns.diverging_palette(200, 340, s=100, l=30, n=9, center="light", as_cmap=True)
inverse_positions = {value: key for key, value in positions.items()}
comparison_positions = {'4': 'Cluster 4', '3': 'Cluster 3', '2': 'Cluster 2', '1': 'Cluster 1', 'all': 'All', }
fig, axes = plt.subplots(len(correlations['dataset_2'].unique()), len(correlations['dataset_1'].unique()), figsize=(20, 5))#, sharex=True, sharey=True)
for j, dataset_1 in enumerate(correlations['dataset_1'].unique()):
for i, dataset_2 in enumerate(correlations['dataset_2'].unique()):
# logger.info(f'{i}: {dataset_1}, {j}: {dataset_2}')
j = positions[dataset_1]
ax = axes[i][j]
corr_color = correlations[(correlations['dataset_1'] == dataset_1) & (correlations['dataset_2'] == dataset_2)]['corr_color'].tolist()[0]
pval = correlations[(correlations['dataset_1'] == dataset_1) & (correlations['dataset_2'] == dataset_2)]['spearmans_pval'].tolist()[0]
ax.set_facecolor(corr_color)
if pval < 0.01:
ax.annotate('**', (0.3, 0.3), fontsize=28)
if pval < 0.05:
ax.annotate('*', (0.3, 0.3), fontsize=28)
else:
pass
# axes fix labels
remove_labels(ax, i, j, f'{positions[dataset_1]}', f'{comparison_positions[dataset_2]}')
plt.tight_layout(pad=-0.5)
plt.savefig(f'{output_folder}{filter_type}correlation_heatmap.png')
plt.savefig(f'{output_folder}{filter_type}correlation_heatmap.svg')
# ------------------------------------------------Read in standard components------------------------------------------------
resource_details = pd.read_excel(f'{published_stabilities}')
resource_details.drop([col for col in resource_details.columns.tolist() if 'Unnamed: ' in col], axis=1, inplace=True)
resources = resource_details['dataset_id'].tolist()
stability_summary = pd.read_excel(f'{correlations}', sheet_name=None)
stability_summary.update({key: value.drop([col for col in value.columns.tolist() if 'Unnamed: ' in str(col)], axis=1) for key, value in stability_summary.items()})
datasets = ['Leuenberger_2017', 'Ogburn_2017A', 'Ogburn_2017B', 'Walker_2019', 'Roberts_2016A', 'Roberts_2016B', 'Jarzab_2020F', 'Jarzab_2020G', 'Jarzab_2020H', 'Becher_2016', 'Franken_2015', 'Miettinen_2018', 'Savitski_2018A', 'Savitski_2018B', 'Ball_2020', 'Jarzab_2020N', 'Jarzab_2020O', 'Savitski_2014A', 'Sridharan_2019', 'Jarzab_2020M']
cmap = sns.diverging_palette(200, 340, s=100, l=30, n=9, center="light", as_cmap=True)
def color_picker(val, val_min=-1, val_max=1):
norm = colors.Normalize(vmin=val_min, vmax=val_max)
rgb = cmap(norm(val))[:3] # will return rgba, we take only first 3 so we get rgb
return colors.rgb2hex(rgb)
#---------------------- Prepare measured correlation datasets----------------------
comparison = stability_summary['measured_comparison'].copy().set_index('KO')[datasets].copy()
correlations = stability_summary['measured_cluster_correlation'].copy().rename(columns={'index': 'datasets'})
correlations[['dataset_1', 'dataset_2']] = correlations['datasets'].str.split('-', expand=True)
# generate positions and size calculations for plotting
correlations['size'] = pd.cut(correlations['spearmans_pval'], bins=[0.0, 0.01, 0.05, 1.0], labels=[0.01, 0.05, 1], include_lowest=True)
# correlations.dropna(subset=['size'], inplace=True) # remove 1:1 comparisons
# positions = resource_details.copy().sort_values(['technique', 'sample_species', 'sample_type'])
positions = dict(zip(datasets, np.arange(0, len(datasets))))
correlations['x_pos'] = correlations['dataset_1'].map(positions) # to mimic order of the correlation facet plot
correlations['y_pos'] = [0 if cluster == 'all' else int(cluster) for cluster in correlations['dataset_2']]
# visualise_correlations(comparison, correlations, filter_type='measured', output_folder=output_folder)
visualise_summary(comparison, correlations, filter_type='measured', output_folder=output_folder)
#---------------------- Prepare filtered correlation datasets----------------------
comparison = stability_summary['filtered_comparison'].set_index('KO')[datasets].copy()
correlations = stability_summary['filtered_cluster_correlation'].copy().rename(columns={'index': 'datasets'})
correlations[['dataset_1', 'dataset_2']] = correlations['datasets'].str.split('-', expand=True)
correlations = correlations[(correlations['dataset_1'].isin(datasets))]
# generate positions and size calculations for plotting
# correlations['size'] = - np.log10(correlations['pearsons_pval']).replace([np.inf, -np.inf], np.nan)
correlations['size'] =
|
pd.cut(correlations['spearmans_pval'], bins=[0.0, 0.01, 0.05, 1.0], labels=[0.01, 0.05, 1], include_lowest=True)
|
pandas.cut
|
# -*- coding: utf-8 -*-
"""
Created on Sun Sep 1 18:10:18 2019
@author: <NAME>
Code will plot the keypoint coordinates vs time in order to assign the maximum
value from this plot to the real-world distance measurement. This will be
the label.
Meeting:
"""
import pandas as pd
import matplotlib.pyplot as plt
#Edit data within file.
#Open file and set to a certain variable
df =
|
pd.read_csv('thesavedones.csv', header=None)
|
pandas.read_csv
|
#!/usr/bin/env python
import pandas as pd
import numpy as np
import scipy, sklearn, os, sys, string, fileinput, glob, re, math, itertools, functools
import copy, multiprocessing, traceback, logging, pickle, traceback
import scipy.stats, sklearn.decomposition, sklearn.preprocessing, sklearn.covariance
from scipy.stats import describe
from scipy import sparse
import os.path
import scipy.sparse
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.preprocessing import normalize
from collections import defaultdict
from tqdm import tqdm
def fast_csv_read(filename, *args, **kwargs):
small_chunk = pd.read_csv(filename, nrows=50)
if small_chunk.index[0] == 0:
coltypes = dict(enumerate([a.name for a in small_chunk.dtypes.values]))
return pd.read_csv(filename, dtype=coltypes, *args, **kwargs)
else:
coltypes = dict((i+1,k) for i,k in enumerate([a.name for a in small_chunk.dtypes.values]))
coltypes[0] = str
return pd.read_csv(filename, index_col=0, dtype=coltypes, *args, **kwargs)
def processRawData(rawdir, h5file):
M = pd.concat([fast_csv_read(f) for f in glob.glob("{0}/vdj_v1_hs_aggregated_donor?_binarized_matrix.csv".format(rawdir))])
truthval_cols = [c for c in M.columns if 'binder' in c]
surface_marker_cols = "CD3,CD19,CD45RA,CD4,CD8a,CD14,CD45RO,CD279_PD-1,IgG1,IgG2a,IgG2b,CD127,CD197_CCR7,HLA-DR".split(",")
dx = copy.deepcopy(M.loc[:, surface_marker_cols])
M.loc[:,surface_marker_cols] = (np.log2(1 + 1e6*dx.divide(dx.sum(axis=1), axis="index"))).values
# get rid of B cells etc.
f_trim = lambda v: v < v.quantile(0.975)
ok_Tcells = f_trim(M["CD19"]) & f_trim(M["CD4"]) & f_trim(M["CD14"])
M = M.loc[ ok_Tcells, :]
a = M.loc[:,truthval_cols]
a2= M['cell_clono_cdr3_aa'].apply(lambda v: 'TRA:' in v and 'TRB:' in v)
bc_idx = (a2 & a.any(axis=1))
M = M[bc_idx].reset_index(drop=True)
mcols = ["donor","cell_clono_cdr3_aa"] + truthval_cols + surface_marker_cols
print("Flag 67.10 ", h5file)
M.loc[:,mcols].to_hdf(h5file, "df", mode="w")
def chunkifyCDRseqs(M, f_tra_filter, f_trb_filter, tra_start=0, trb_start=0, tra_end=100, trb_end=100):
truthval_cols = [c for c in M.columns if 'binder' in c]
surface_marker_cols = "CD3,CD19,CD45RA,CD4,CD8a,CD14,CD45RO,CD279_PD-1,IgG1,IgG2a,IgG2b,CD127,CD197_CCR7,HLA-DR".split(",")
tra_L = []; trb_L = []; binds_L = []; idxL = []
for i in tqdm(range(M.shape[0])): #range(M.shape[0]):
sl = M.at[i,"cell_clono_cdr3_aa"].split(";")
a_l = [x[4:][tra_start:tra_end] for x in sl if x[:4]=="TRA:" and f_tra_filter(x[4:])]
b_l = [x[4:][trb_start:trb_end] for x in sl if x[:4]=="TRB:" and f_trb_filter(x[4:])]
c_np = M.loc[i,truthval_cols].astype(int).values
A0 = ord('A')
for a in a_l:
a_np = np.zeros(26)
for letter in a:
a_np[ord(letter)-A0] += 1
for b in b_l:
b_np = np.zeros(26)
for letter in b:
b_np[ord(letter)-A0] += 1
tra_L.append(a_np)
trb_L.append(b_np)
binds_L.append(c_np)
idxL.append(i)
tra = np.array(tra_L)
trb = np.array(trb_L)
binds = np.array(binds_L)
return tra, trb, binds, M.loc[:, surface_marker_cols].iloc[idxL,:], M.iloc[idxL,:]["donor"]
def f_dataset_helper(w_vdj, x, md, mw, w_surface_markers):
trax, trbx, bindsx, d_surface_markers, _ = w_vdj
try:
return run_dataset_schema(trax, trbx, bindsx, 0.01, max_w = mw, mode=md, d_surface_markers = d_surface_markers, w_surface_markers=w_surface_markers)
except:
print ("Flag 67567.10 Saw exception in f_dataset_helper")
return (None, None)
def run_dataset_schema(tra, trb, binds, min_corr, max_w=1000, mode="both", d_surface_markers=None, w_surface_markers=0):
alphabet = [chr(ord('A')+i) for i in range(26)]
non_aa = np.array([ord(c)-ord('A') for c in "BJOUXZ"]) #list interepretation of string
if "both" in mode:
D = np.hstack([tra,trb])
letters = np.array(['a'+c for c in alphabet] + ['b'+c for c in alphabet])
to_delete = list(non_aa)+list(non_aa+26)
elif "tra" in mode:
D = tra
letters = ['{0}'.format(chr(ord('A')+i)) for i in range(26)]
to_delete = list(non_aa)
elif "trb" in mode:
D = trb
letters = ['{0}'.format(chr(ord('A')+i)) for i in range(26)]
to_delete = list(non_aa)
D = np.delete(D, to_delete, axis=1)
letters = np.delete(letters, to_delete)
if "bin:" in mode:
D = 1*(D>0)
if "std:" in mode:
D = D / (1e-12 + D.std(axis=0))
g = [binds]; wg = [1]; tg=["feature_vector"]
if w_surface_markers != 0:
g.append(d_surface_markers.values)
wg.append(w_surface_markers)
tg.append("feature_vector")
try:
sys.path.append(os.path.join(sys.path[0],'../../schema'))
import schema_qp
except:
from schema import schema_qp
afx = schema_qp.SchemaQP(min_corr, max_w, params = {"require_nonzero_lambda":1}, mode="scale")
afx.fit(D,g,tg,wg)
return (pd.Series(np.sqrt(afx._wts), index=letters), afx._soln_info)
def f_tra_filter(v):
return v[:2]=="CA" and len(v)>=13
def f_trb_filter(v):
return v[:4]=="CASS" and len(v)>=13
def do_dataset_process(M, l, n_jobs, intermediate_file=None, include_full_seq=True, w_surface_markers=0, kmer_type=""):
try:
if include_full_seq:
l.append((2,4,7,11))
l1 = [(M, f_tra_filter, f_trb_filter, *v) for v in l]
print ("Flag 681.10 ", len(l1), M.shape, l, n_jobs)
if intermediate_file is not None and os.path.exists(intermediate_file):
vdj_data = pickle.load(open(intermediate_file,'rb'))
else:
pool = multiprocessing.Pool(processes = n_jobs)
try:
vdj_data = pool.starmap(chunkifyCDRseqs, l1)
finally:
pool.close()
pool.join()
if intermediate_file is not None:
pickle.dump(vdj_data, open(intermediate_file,'wb'))
print ("Flag 681.50 ", len(vdj_data))
lx = []
for md in ["tra","trb"]:
for mw in [1.5, 1.75, 2, 2.25, 3, 5]:
lx.extend([(w, l[i], kmer_type+":"+md, mw, w_surface_markers) for i,w in enumerate(vdj_data)])
print ("Flag 681.70 ", len(lx))
pool2 = multiprocessing.Pool(processes = n_jobs)
try:
lx2 = pool2.starmap(f_dataset_helper, lx)
except:
lx2
finally:
pool2.close()
pool2.join()
print ("Flag 681.80 ", len(lx2))
f_0_1_scaler = lambda v: (v-np.min(v))/(np.max(v)-np.min(v))
ly = []
rd = {}
for i, v in enumerate(lx):
_, k, md, mw, w_surface = v
rd[(k, md, mw, w_surface)] = lx2[i][0]
ly.append(f_0_1_scaler(lx2[i][0]))
print ("Flag 681.85 ", len(ly), len(rd))
v_rd = pd.DataFrame(ly).median(axis=0).sort_values()
return v_rd, rd
except:
return (None, None)
def f_colprocess_helper(trx, binds, surface_markers, mw, w_surface_markers):
try:
g = [binds]; wg = [1]; tg=["feature_vector"]
if w_surface_markers != 0:
g.append(d_surface_markers.values)
wg.append(w_surface_markers)
tg.append("feature_vector")
try:
sys.path.append(os.path.join(sys.path[0],'../../schema'))
import schema_qp
except:
from schema import schema_qp
afx = schema_qp.SchemaQP(0.01, mw, params = {"require_nonzero_lambda":1,
"scale_mode_uses_standard_scaler":1,
"d0_type_is_feature_vector_categorical":1,},
mode="scale")
afx.fit(trx,g,tg,wg)
return (pd.Series(np.sqrt(afx._wts)), afx._soln_info)
except:
print ("Flag 67568.10 Saw exception in f_colprocess_helper")
return (None, None)
def do_columnwise_process(M, chain, n_jobs, intermediate_file=None, w_surface_markers=0):
assert chain in ["tra","trb"]
try:
truthval_cols = [c for c in M.columns if 'binder' in c]
surface_marker_cols = "CD3,CD19,CD45RA,CD4,CD8a,CD14,CD45RO,CD279_PD-1,IgG1,IgG2a,IgG2b,CD127,CD197_CCR7,HLA-DR".split(",")
def f_pad20(s):
return [ord(c)-ord('A')+1 for c in s[:20]] + [0]*max(0,20-len(s))
trx_L = []; binds_L = []; markers_L = []
for i in tqdm(range(M.shape[0])): #range(M.shape[0]):
sl = M.at[i,"cell_clono_cdr3_aa"].split(";")
for x in sl:
if x[:4].lower() != (chain+":"): continue
trx_L.append(f_pad20(x[4:]))
binds_L.append(M.loc[i,truthval_cols].astype(int).values)
markers_L.append(M.loc[i,surface_marker_cols].astype(int).values)
trx = np.array(trx_L)
binds = np.array(binds_L)
surface_markers = np.array(markers_L)
vdj_data = (trx, binds, surface_markers)
print ("Flag 682.10 ", M.shape, chain, n_jobs)
if intermediate_file is not None and os.path.exists(intermediate_file):
vdj_data = pickle.load(open(intermediate_file,'rb'))
elif intermediate_file is not None:
pickle.dump(vdj_data, open(intermediate_file,'wb'))
trx, binds, surface_markers = vdj_data
print ("Flag 681.50 ", trx.shape)
lx = []
for mw in [1.6, 1.8, 2, 2.2]:
lx.extend([(trx, binds, surface_markers, mw, w_surface_markers)])
print ("Flag 681.70 ", len(lx))
pool2 = multiprocessing.Pool(processes = n_jobs)
try:
lx2 = pool2.starmap(f_colprocess_helper, lx)
except:
lx2
finally:
pool2.close()
pool2.join()
print ("Flag 681.80 ", len(lx2))
f_0_1_scaler = lambda v: (v-np.min(v))/(np.max(v)-np.min(v))
ly = []
rd = {}
for i, v in enumerate(lx):
_, _, _, mw, w_surface = v
rd[(chain, mw, w_surface)] = lx2[i][0]
ly.append(f_0_1_scaler(lx2[i][0]))
print ("Flag 681.85 ", len(ly), len(rd))
v_rd = pd.DataFrame(ly).median(axis=0).sort_values()
v_rd2 =
|
pd.DataFrame(ly)
|
pandas.DataFrame
|
# coding: utf-8
"""tools for analyzing VPs in an individual precipitation event"""
from collections import OrderedDict
from os import path
from datetime import timedelta
import numpy as np
import pandas as pd
import xarray as xr
import matplotlib as mpl
import matplotlib.pyplot as plt
from scipy.io import loadmat
from radcomp.vertical import (filtering, classification, plotting, insitu, ml,
deriv, NAN_REPLACEMENT)
from radcomp import arm, azs
from radcomp.tools import strftime_date_range, cloudnet
from j24 import home, daterange2str
USE_LEGACY_DATA = False
if USE_LEGACY_DATA:
DATA_DIR = path.join(home(), 'DATA', 'vprhi')
DATA_FILE_FMT = '%Y%m%d_IKA_VP_from_RHI.mat'
else:
DATA_DIR = path.join(home(), 'DATA', 'vprhi2')
DATA_FILE_FMT = '%Y%m%d_IKA_vprhi.mat'
DEFAULT_PARAMS = ['zh', 'zdr', 'kdp']
def case_id_fmt(t_start, t_end=None, dtformat='{year}{month}{day}{hour}',
day_fmt='%d', month_fmt='%m', year_fmt='%y', hour_fmt='T%H'):
"""daterange2str wrapper for date range based IDs"""
return daterange2str(t_start, t_end, dtformat=dtformat, hour_fmt=hour_fmt,
day_fmt=day_fmt, month_fmt=month_fmt,
year_fmt=year_fmt)
def date_us_fmt(t_start, t_end, dtformat='{day} {month} {year}', day_fmt='%d',
month_fmt='%b', year_fmt='%Y'):
"""daterange2str wrapper for US human readable date range format"""
return daterange2str(t_start, t_end, dtformat=dtformat, day_fmt=day_fmt,
month_fmt=month_fmt, year_fmt=year_fmt)
def vprhimat2pn(datapath):
"""Read vertical profile mat files to Panel."""
# TODO: Panel
try:
data = loadmat(datapath)['VP_RHI']
except FileNotFoundError as e:
print('{}. Skipping.'.format(e))
return pd.Panel()
fields = list(data.dtype.fields)
fields.remove('ObsTime')
fields.remove('height')
str2dt = lambda tstr: pd.datetime.strptime(tstr, '%Y-%m-%dT%H:%M:%S')
t = list(map(str2dt, data['ObsTime'][0][0]))
h = data['height'][0][0][0]
data_dict = {}
for field in fields:
data_dict[field] = data[field][0][0].T
try:
return pd.Panel(data_dict, major_axis=h, minor_axis=t)
# sometimes t does not have all values
except ValueError as e:
if data_dict['ZH'].shape[1] == 96:
# manouver to set correct timestamps when data missing
t1 = t[0] + timedelta(hours=23, minutes=45)
midnight = t1.replace(hour=0, minute=0)
if midnight <= t[0]:
midnight += timedelta(hours=24)
dt = t1-midnight
dt_extra = timedelta(minutes=15-(dt.total_seconds()/60)%15)
dt = dt + dt_extra
t = pd.date_range(t[0]-dt, t1-dt, freq='15min')
print('ObsTime missing values! Replacing with generated timestamps.')
return pd.Panel(data_dict, major_axis=h, minor_axis=t)
else:
raise e
def downward_gradient(df):
"""smooth downwards gradient"""
df_smooth = df.fillna(0).apply(filtering.savgol_series, args=(19, 2))
dfg = df_smooth.diff()
dfg = dfg.rolling(5, center=True).mean() # smooth gradient
dfg[df.isnull()] = np.nan
return -dfg
def proc_indicator(pn, var='zdrg', tlims=(-20, -10)):
"""gradient to process indicator"""
return pn[var][(pn.T < tlims[1]) & (pn.T > tlims[0])].sum()
def kdp2phidp(kdp, dr_km):
"""Retrieve phidp from kdp."""
kdp_filled = kdp.fillna(0)
return 2*kdp_filled.cumsum().multiply(dr_km, axis=0)
def data_range(dt_start, dt_end):
"""read raw VP data between datetimes"""
filepath_fmt = path.join(DATA_DIR, DATA_FILE_FMT)
fnames = strftime_date_range(dt_start, dt_end, filepath_fmt)
pns = map(vprhimat2pn, fnames)
pns_out = []
for pn in pns:
if not pn.empty:
pns_out.append(pn)
return pd.concat(pns_out, axis=2, sort=True).loc[:, :, dt_start:dt_end]
def prepare_pn(pn, kdpmax=np.nan):
"""Filter data and calculate extra parameters."""
dr = pd.Series(pn.major_axis.values, index=pn.major_axis).diff().bfill()
dr_km = dr/1000
pn_new = pn.copy()
pn_new['KDP_orig'] = pn_new['KDP'].copy()
#pn_new['KDP'][pn_new['KDP'] < 0] = np.nan
pn_new['phidp'] = kdp2phidp(pn_new['KDP'], dr_km)
kdp = pn_new['KDP'] # a view
# remove extreme KDP values in the panel using a view
if USE_LEGACY_DATA:
kdp[kdp > kdpmax] = 0
#kdp[kdp < 0] = 0
pn_new = filtering.fltr_median(pn_new)
pn_new = filtering.fltr_nonmet(pn_new)
# ensure all small case keys are in place
pn_new = filtering.create_filtered_fields_if_missing(pn_new, DEFAULT_PARAMS)
#pn_new = filtering.fltr_ground_clutter_median(pn_new)
pn_new['kdpg'] = 1000*downward_gradient(pn_new['kdp'])
pn_new['zdrg'] = downward_gradient(pn_new['zdr'])
return pn_new
def dt2pn(dt0, dt1, **kws):
"""Read and preprocess VP data between datetimes."""
pn_raw = data_range(dt0, dt1)
return prepare_pn(pn_raw, **kws)
def fillna(dat, field=''):
"""Fill nan values with values representing zero scatterers."""
data = dat.copy()
if isinstance(data, pd.Panel):
for field in list(data.items):
data[field].fillna(NAN_REPLACEMENT[field.upper()], inplace=True)
elif isinstance(data, pd.DataFrame):
data.fillna(NAN_REPLACEMENT[field.upper()], inplace=True)
return data
def prepare_data(pn, fields=DEFAULT_PARAMS, hlimits=(190, 10e3), kdpmax=None):
"""Prepare data for classification. Scaling has do be done separately."""
try:
data = pn[fields, hlimits[0]:hlimits[1], :].transpose(0, 2, 1)
except TypeError: # assume xarray dataset
pn = pn.to_dataframe().to_panel() # TODO: Panel
data = pn[fields, hlimits[0]:hlimits[1], :].transpose(0, 2, 1)
if kdpmax is not None:
data['KDP'][data['KDP'] > kdpmax] = np.nan
return fillna(data)
def prep_data(pn, vpc):
"""prepare_data wrapper"""
return prepare_data(pn, fields=vpc.params, hlimits=vpc.hlimits, kdpmax=vpc.kdpmax)
def round_time_index(data, resolution='1min'):
"""round datetime index to a given resolution"""
dat = data.copy()
ind = data.index.round(resolution)
dat.index = ind
return dat
def inversion_score(c):
"""Score indicating inversion"""
tdiff = c.data['T'].diff()
return tdiff[tdiff>0].sum().median()
def plot_case(c, params=None, interactive=True, raw=True, n_extra_ax=0,
t_contour_ax_ind=False, above_ml_only=False, t_levels=[0],
inverse_transformed=False, plot_extras=['ts', 'silh', 'cl', 'lwe'],
**kws):
"""Visualize a Case object."""
try:
c.load_model_temperature()
except (ValueError, FileNotFoundError):
pass
if not c.has_ml:
above_ml_only = False
if raw:
data = c.data
else:
data = c.cl_data.transpose(0, 2, 1)
if above_ml_only:
data = c.data_above_ml if raw else c.only_data_above_ml(data)
elif inverse_transformed:
if c.has_ml:
above_ml_only = True
data = c.inverse_transform()
if params is None:
if c.vpc is not None:
params = c.vpc.params
else:
params = DEFAULT_PARAMS
plot_classes = ('cl' in plot_extras) and (c.vpc is not None)
plot_silh = ('silh' in plot_extras) and (c.vpc is not None)
plot_lwe = ('lwe' in plot_extras) and (c.pluvio is not None)
if plot_lwe:
plot_lwe = not c.pluvio.data.empty
plot_azs = ('azs' in plot_extras) and (c.azs().size > 0)
plot_fr = ('fr' in plot_extras) and (c.fr().size > 0)
plot_t = ('ts' in plot_extras) and (c.t_surface().size > 0)
plot_lr = ('lr' in plot_extras)
n_extra_ax += plot_t + plot_lwe + plot_fr + plot_azs + plot_silh
next_free_ax = -n_extra_ax
cmap_override = {'LR': 'seismic', 'kdpg': 'bwr', 'zdrg': 'bwr',
'omega': 'seismic_r'}
if plot_lr:
data['LR'] = data['T'].diff()
params = np.append(params, 'LR')
hlims = (0, 11.5e3) if (c.has_ml and not above_ml_only) else (0, 10e3)
fig, axarr = plotting.plotpn(data, fields=params,
n_extra_ax=n_extra_ax, has_ml=c.has_ml,
cmap_override=cmap_override,
hlims=hlims, **kws)
plotfuns = OrderedDict()
plotfuns[c.plot_t] = plot_t
plotfuns[c.plot_silh] = plot_silh
plotfuns[c.plot_lwe] = plot_lwe
plotfuns[c.plot_azs] = plot_azs
plotfuns[c.plot_fr] = plot_fr
for plotfun, flag in plotfuns.items():
if flag:
plotfun(ax=axarr[next_free_ax])
next_free_ax += 1
# plot temperature contours
if t_contour_ax_ind:
if t_contour_ax_ind == 'all':
t_contour_ax_ind = range(len(params))
try:
for i in t_contour_ax_ind:
c.plot_growth_zones(ax=axarr[i], levels=t_levels)
except TypeError:
warnfmt = '{}: Could not plot temperature contours.'
print(warnfmt.format(c.name()))
if plot_classes:
for iax in range(len(axarr)-1):
c.vpc.class_colors(classes=c.classes(), ax=axarr[iax])
has_vpc = (c.vpc is not None)
if c.has_ml and has_vpc and not above_ml_only:
for i in range(len(params)):
c.plot_ml(ax=axarr[i])
c.cursor = mpl.widgets.MultiCursor(fig.canvas, axarr, color='black',
horizOn=True, vertOn=True, lw=0.5)
if interactive:
on_click_fun = lambda event: c._on_click_plot_dt_cs(event, params=params,
inverse_transformed=inverse_transformed,
above_ml_only=above_ml_only)
fig.canvas.mpl_connect('button_press_event', on_click_fun)
for ax in axarr:
ax.xaxis.grid(True)
ax.yaxis.grid(True)
c.set_xlim(ax)
axarr[0].set_title(date_us_fmt(c.t_start(), c.t_end()))
return fig, axarr
class Case:
"""
Precipitation event class for VP studies.
Attributes:
data (Panel)
cl_data (Panel): non-scaled classifiable data
cl_data_scaled (Panel): scaled classifiable data
vpc (radcomp.vertical.VPC): classification scheme
temperature (Series): stored temperature
pluvio (baecc.instruments.Pluvio)
"""
def __init__(self, data=None, cl_data=None, cl_data_scaled=None,
vpc=None, has_ml=False, timedelta=None,
is_convective=None):
self.data = data
self.cl_data = cl_data
self.cl_data_scaled = cl_data_scaled
self.silh_score = None
self.vpc = vpc
self.pluvio = None
self.has_ml = has_ml
self.is_convective = is_convective
self._timedelta = timedelta
self._data_above_ml = None
self._dt_ax = None
self.cursor = None
self._classes = None
@classmethod
def from_dtrange(cls, t0, t1, **kws):
"""Create a case from data between a time range."""
kdpmax = 0.5
if 'has_ml' in kws:
if kws['has_ml']:
kdpmax = 1.3
pn = dt2pn(t0, t1, kdpmax=kdpmax)
return cls(data=pn, **kws)
@classmethod
def from_mat(cls, matfile, **kws):
"""Case object from a single mat file"""
pn = vprhimat2pn(matfile)
data = prepare_pn(pn)
return cls(data=data, **kws)
@classmethod
def from_xarray(cls, ds, **kws):
"""Case from xarray dataset"""
pn = ds.to_dataframe().to_panel()
#data = filtering.create_filtered_fields_if_missing(pn, DEFAULT_PARAMS)
data = prepare_pn(pn)
return cls(data=data, **kws)
@classmethod
def from_nc(cls, ncfile, **kws):
#y u no work?
ds = xr.open_dataset(ncfile)
return cls.from_xarray(ds **kws)
@property
def data_above_ml(self):
"""lazy loading data above ml"""
if not self.has_ml:
return self.data
if self._data_above_ml is None:
self._data_above_ml = self.only_data_above_ml()
return self._data_above_ml
@property
def timedelta(self):
"""time resolution"""
if self._timedelta is None:
dt = self.timestamps().diff().min()
notefmt = 'Case timedelta was not set. Setting to detected value of {}'
print(notefmt.format(dt))
self._timedelta = self.timestamps().diff().min()
return self._timedelta
@timedelta.setter
def timedelta(self, timedelta):
self._timedelta = timedelta
def dataset(self):
"""data Panel as xarray DataSet"""
mapping = dict(major_axis='height', minor_axis='time')
return self.data.to_xarray().to_dataset(dim='items').rename(mapping)
def only_data_above_ml(self, data=None):
"""Data above ml"""
if data is None:
data = self.data
data = fillna(data)
top = self.ml_limits()[1]
return data.apply(ml.collapse2top, axis=(2, 1), top=top)
def name(self, **kws):
"""date range based id"""
return case_id_fmt(self.t_start(), self.t_end(), **kws)
def t_start(self):
"""data start time"""
return self.data.minor_axis[0]
def t_end(self):
"""data end time"""
return self.data.minor_axis[-1]
def timestamps(self, fill_value=None, round_index=False):
"""Data timestamps as Series. Optionally filled with fill_value."""
t = self.data.minor_axis
data = t if fill_value is None else np.full(t.size, fill_value)
ts = pd.Series(index=t, data=data)
if round_index:
return round_time_index(ts)
return ts
def mask(self, raw=False):
"""common data mask"""
if raw:
return self.data['ZH'].isnull()
return self.data['zh'].isnull()
def load_classification(self, name=None, **kws):
"""Load a classification scheme based on its id, and classify."""
if name is None:
name = self.vpc.name()
self.vpc = classification.VPC.load(name)
self.classify(**kws)
def prepare_cl_data(self, save=True, force_no_crop=False):
"""Prepare unscaled classification data."""
if self.data is None:
return None
cl_data = prep_data(self.data, self.vpc)
if self.has_ml and not force_no_crop:
top = self.ml_limits()[1]
collapsefun = lambda df: ml.collapse2top(df.T, top=top).T
cl_data = cl_data.apply(collapsefun, axis=(1, 2))
cl_data = fillna(cl_data)
if cl_data.size == 0:
return None
if save and not force_no_crop:
self.cl_data = cl_data
return cl_data
def scale_cl_data(self, save=True, force_no_crop=False):
"""scaled version of classification data
time rounded to the nearest minute
"""
cl_data = self.prepare_cl_data(save=save, force_no_crop=force_no_crop)
if cl_data is None:
return None
#scaled = scale_data(cl_data).fillna(0)
scaled = self.vpc.feature_scaling(cl_data).fillna(0)
if save and not force_no_crop:
self.cl_data_scaled = scaled
return scaled
def ml_limits(self, interpolate=True):
"""ML top using peak detection"""
if self.vpc is None:
nans = self.timestamps(fill_value=np.nan)
return nans.copy(), nans.copy()
if 'MLI' not in self.data:
self.prepare_mli(save=True)
bot, top = ml.ml_limits(self.data['MLI'], self.data['RHO'])
if not interpolate:
return bot, top
return tuple(lim.interpolate().bfill().ffill() for lim in (bot, top))
def prepare_mli(self, save=True):
"""Prepare melting layer indicator."""
cl_data_scaled = self.scale_cl_data(force_no_crop=True)
zdr = cl_data_scaled['zdr'].T
try:
z = cl_data_scaled['zh'].T
except KeyError:
z = cl_data_scaled['ZH'].T
rho = self.data['RHO'].loc[z.index]
mli = ml.indicator(zdr, z, rho)
if save:
self.data['MLI'] = mli
return mli
def classify(self, vpc=None, save=True):
"""classify based on class_scheme"""
if vpc is not None:
self.vpc = vpc
if self.cl_data_scaled is None:
self.scale_cl_data()
classify_kws = {}
if 'temp_mean' in self.vpc.params_extra:
classify_kws['extra_df'] = self.t_surface()
if self.cl_data_scaled is not None and self.vpc is not None:
classes, silh = self.vpc.classify(self.cl_data_scaled, **classify_kws)
classes.name = 'class'
if save:
self.silh_score = silh.reindex(self.data.minor_axis)
self._classes = classes
return classes, silh
return None, None
def inverse_transform(self):
"""inverse transformed classification data"""
pn = self.vpc.inverse_data
pn.major_axis = self.cl_data_scaled.minor_axis
pn.minor_axis = self.data.minor_axis
return self.vpc.feature_scaling(pn, inverse=True)
def plot_classes(self):
"""plot_classes wrapper"""
return plotting.plot_classes(self.cl_data_scaled, self.classes())
def plot(self, **kws):
"""Visualize the case."""
return plot_case(self, **kws)
def plot_growth_zones(self, **kws):
"""plotting.plot_growth_zones wrapper"""
self.load_model_temperature()
plotting.plot_growth_zones(self.data['T'], **kws)
def plot_ml(self, linestyle='', marker='_', ax=None):
"""Plot melting layer highlighting interpolated parts."""
ax = ax or plt.gca()
common_kws = dict(linestyle=linestyle, marker=marker)
_, topi = self.ml_limits(interpolate=True)
_, top = self.ml_limits(interpolate=False)
ax.plot(topi.index, topi.values, color='gray', zorder=5, **common_kws)
ax.plot(top.index, top.values, color='black', zorder=6, **common_kws)
return ax
def shift(self, data):
"""shift data for plotting"""
half_dt = self.timedelta/2
return data.shift(freq=half_dt)
def plot_series(self, data, ax=None, **kws):
"""Plot time series correctly shifted."""
ax = ax or plt.gca()
dat = self.shift(data)
plotting.plot_data(dat, ax=ax, **kws)
self.set_xlim(ax)
return ax
def plot_t(self, ax, tmin=-25, tmax=10):
"""Plot surface temperature."""
self.plot_series(self.t_surface(), ax=ax)
ax.set_ylabel(plotting.LABELS['temp_mean'])
ax.set_ylim([tmin, tmax])
return ax
def plot_lwe(self, ax, rmax=4):
"""plot LWE"""
self.plot_series(self.lwe(), ax=ax, label=self.pluvio.name)
ax.set_ylim(bottom=0, top=rmax)
ax.set_ylabel(plotting.LABELS['intensity'])
return ax
def plot_fr(self, ax, frmin=-0.1, frmax=1):
"""Plot riming fraction."""
self.plot_series(self.fr(), ax=ax, label='FR')
ax.set_ylim(bottom=frmin, top=frmax)
ax.set_ylabel(plotting.LABELS[self.fr().name])
return ax
def plot_azs(self, ax, amin=10, amax=4000):
"""Plot prefactor of Z-S relation"""
a_zs = self.azs()
label = plotting.LABELS[a_zs.name]
self.plot_series(a_zs, ax=ax, label=label)
ax.set_ylabel(plotting.LABELS[a_zs.name])
ax.set_yscale('log')
ax.set_ylim(bottom=amin, top=amax)
ax.set_yticks([10, 100, 1000])
return ax
def plot_silh(self, ax=None):
"""Plot silhouette coefficient"""
self.plot_series(self.silh_score, ax=ax)
ax.set_ylabel('silhouette\ncoefficient')
ax.set_ylim(bottom=-1, top=1)
ax.set_yticks([-1, 0, 1])
return ax
def train(self, **kws):
"""Train a classification scheme with scaled classification data."""
if self.vpc.extra_weight:
extra_df = self.t_surface()
else:
extra_df = None
if self.cl_data_scaled is None:
self.scale_cl_data()
return self.vpc.train(data=self.cl_data_scaled,
extra_df=extra_df, **kws)
def _on_click_plot_dt_cs(self, event, params=None, **kws):
"""on click plot profiles at a timestamp"""
try:
dt = plotting.num2date(event.xdata)
except TypeError: # clicked outside axes
return
ax, update, axkws = plotting.handle_ax(self._dt_ax)
axkws.update(kws)
self._dt_ax = self.plot_data_at(dt, params=params, **axkws)
if update:
ax.get_figure().canvas.draw()
def nearest_datetime(self, dt, method='nearest', **kws):
"""Round datetime to nearest data timestamp."""
i = self.data.minor_axis.get_loc(dt, method=method, **kws)
return self.data.minor_axis[i]
def plot_data_at(self, dt, params=None, inverse_transformed=False,
above_ml_only=False, **kws):
"""Plot profiles at given timestamp."""
data_orig = self.data_above_ml if above_ml_only else self.data
# integer location
i = data_orig.minor_axis.get_loc(dt, method='nearest')
dti = data_orig.minor_axis[i]
data = data_orig.iloc[:, :, i]
if params is not None:
data = data[params]
axarr = plotting.plot_vps(data, has_ml=self.has_ml, **kws)
if inverse_transformed:
plotting.plot_vps(self.inverse_transform().iloc[:, :, i],
has_ml=self.has_ml, axarr=axarr)
if not above_ml_only and self.has_ml:
_, ml_top = self.ml_limits(interpolate=False)
_, ml_top_i = self.ml_limits(interpolate=True)
for ax in axarr:
ax.axhline(ml_top_i.loc[dti], color='gray')
ax.axhline(ml_top.loc[dti], color='black')
t = data_orig.minor_axis[i]
axarr[1].set_title(str(t))
return axarr
def set_xlim(self, ax):
start = self.t_start()-self.timedelta/2
end = self.t_end()+self.timedelta/2
ax.set_xlim(left=start, right=end)
return ax
def base_minute(self):
"""positive offset in minutes for profile measurements after each hour
"""
return self.data.minor_axis[0].round('1min').minute%15
def base_middle(self):
dt_minutes = round(self.timedelta.total_seconds()/60)
return self.base_minute()-dt_minutes/2
def time_weighted_mean(self, data, offset_half_delta=True):
dt = self.timedelta
if offset_half_delta:
base = self.base_middle()
offset = dt/2
else:
base = self.base_minute()
offset = 0
return insitu.time_weighted_mean(data, rule=dt, base=base, offset=offset)
def t_surface(self, use_arm=False, interp_gaps=True):
"""resampled ground temperature
Returns:
Series: resampled temperature
"""
t_end = self.t_end()+pd.Timedelta(minutes=15)
if use_arm:
t = arm.var_in_timerange(self.t_start(), t_end, var='temp_mean')
else:
hdfpath = path.join(home(), 'DATA', 't_fmi_14-17.h5')
if not path.exists(hdfpath):
return pd.Series()
t = pd.read_hdf(hdfpath, 'data')['TC'][self.t_start():t_end]
t.name = 'temp_mean'
tre = t.resample('15min', base=self.base_minute()).mean()
if interp_gaps:
tre = tre.interpolate()
return tre
def azs(self, **kws):
t_end = self.t_end()+pd.Timedelta(minutes=15)
data = azs.load_series()[self.t_start(): t_end]
if data.empty:
return pd.Series()
return data.resample('15min', base=self.base_minute()).mean()
def load_pluvio(self, **kws):
"""load_pluvio wrapper"""
self.pluvio = insitu.load_pluvio(start=self.t_start(),
end=self.t_end(), **kws)
def load_model_data(self, variable='temperature'):
"""Load interpolated model data."""
self.data[variable] = cloudnet.load_as_df(self.data.major_axis,
self.data.minor_axis,
variable=variable)
def load_model_temperature(self, overwrite=False):
"""Load interpolated model temperature if not already loaded."""
if 'T' in self.data and not overwrite:
return
t = cloudnet.load_as_df(self.data.major_axis, self.data.minor_axis,
variable='temperature') - 273.15
self.data['T'] = t
def lwe(self):
"""liquid water equivalent precipitation rate"""
if self.pluvio is None:
self.load_pluvio()
i = self.pluvio.intensity()
return self.time_weighted_mean(i, offset_half_delta=False)
def fr(self):
"""rime mass fraction"""
t_end = self.t_end()+pd.Timedelta(minutes=15)
hdfpath = path.join(home(), 'DATA', 'FR_haoran.h5')
if not path.exists(hdfpath):
return
|
pd.Series()
|
pandas.Series
|
import numpy as np
import pandas as pd
def is_contained(a, b):
""" Check if segment b is fully contained within segment a """
return b[0] >= a[0] and b[1] <= a[1]
def contained_counts_unopt(X, Y):
""" Find counts of overlapping segments fully contained in non-overlapping segments (unopt)
Args:
X (numpy.array): start and end of non-overlapping segments with shape (N,2) for N segments
Y (numpy.array): start and end of overlapping segments with shape (M,2) for M segments
Returns:
numpy.array: N length array of counts of Y countained in X
Both X and Y are assumed to be ordered by start position.
"""
C = np.zeros(X.shape[0])
y_idx = 0
for x_idx, x in enumerate(X):
while y_idx < Y.shape[0] and Y[y_idx][0] < x[0]:
y_idx += 1
while y_idx < Y.shape[0] and Y[y_idx][0] <= x[1]:
if is_contained(x, Y[y_idx]):
C[x_idx] += 1
y_idx += 1
return C
def contained_counts(X, Y):
""" Find counts of overlapping segments fully contained in non-overlapping segments
Args:
X (numpy.array): start and end of non-overlapping segments with shape (N,2) for N segments
Y (numpy.array): start and end of overlapping segments with shape (M,2) for M segments
Returns:
numpy.array: N length array of counts of Y countained in X
X is assumed to be ordered by start position.
"""
idx = np.searchsorted(X[:,1], Y[:,0])
end_idx = np.searchsorted(X[:,1], Y[:,1])
# Mask Y segments outside last X segment
outside = end_idx >= X.shape[0]
idx[outside] = 0
# Filter for containment, same X segment, not outside
idx = idx[
(Y[:,0] >= X[idx,0]) &
(Y[:,1] <= X[idx,1]) &
(idx == end_idx) &
(~outside)
]
# Count number of Y in each X
count = np.bincount(idx, minlength=X.shape[0])
return count
def overlapping_counts(X, Y):
""" Find counts of segments in Y overlapping positions in X
X and Y are assume sorted, Y by starting position, X by position
"""
C = np.zeros(X.shape[0])
x_idx = 0
for y in Y:
while x_idx < X.shape[0] and X[x_idx] <= y[0]:
x_idx += 1
x_idx_1 = x_idx
while x_idx_1 < X.shape[0] and X[x_idx_1] < y[1]:
C[x_idx_1] += 1
x_idx_1 += 1
return C
def find_contained_positions_unopt(X, Y):
""" Find mapping of positions contained within non-overlapping segments (unopt)
Args:
X (numpy.array): start and end of non-overlapping segments with shape (N,2) for N segments
Y (numpy.array): positions with shape (M,) for M positions
Returns:
numpy.array: M length array of indices into X containing elements in Y
X is assumed to be ordered by start position.
For positions not contained within any segment, value in returned array will be -1
"""
M = [-1]*Y.shape[0]
for x_idx, x in enumerate(X):
for y_idx, y in enumerate(Y):
if Y[y_idx] >= x[0] and Y[y_idx] < x[1]:
assert M[y_idx] == -1
M[y_idx] = x_idx
return M
def find_contained_positions(X, Y):
""" Find mapping of positions contained within non-overlapping segments
Args:
X (numpy.array): start and end of non-overlapping segments with shape (N,2) for N segments
Y (numpy.array): positions with shape (M,) for M positions
Returns:
numpy.array: M length array of indices into X containing elements in Y
X is assumed to be ordered by start position.
For positions not contained within any segment, value in returned array will be -1
"""
# Positions less than segment end point
idx = np.searchsorted(X[:, 1], Y, side='right')
# Mask positions outside greatest endpoint
mask = idx < X.shape[0]
idx[~mask] = -1
# Mask positions that are not fully contained within a segment
mask = mask & (Y >= X[idx, 0]) & (Y < X[idx, 1])
idx[~mask] = -1
return idx
def find_contained_segments_unopt(X, Y):
""" Find mapping of segments contained within non-overlapping segments (unopt)
Args:
X (numpy.array): start and end of non-overlapping segments with shape (N,2) for N segments
Y (numpy.array): start and end of overlapping segments with shape (M,2) for M segments
Returns:
numpy.array: M length array of indices into X containing elements in Y
X is assumed to be ordered by start position.
For positions not contained within any segment, value in returned array will be -1
"""
M = [-1]*Y.shape[0]
for x_idx, x in enumerate(X):
for y_idx, y in enumerate(Y):
if Y[y_idx, 0] >= x[0] and Y[y_idx, 1] <= x[1]:
assert M[y_idx] == -1
M[y_idx] = x_idx
return M
def find_contained_segments(X, Y):
""" Find mapping of segments contained within non-overlapping segments
Args:
X (numpy.array): start and end of non-overlapping segments with shape (N,2) for N segments
Y (numpy.array): start and end of overlapping segments with shape (M,2) for M segments
Returns:
numpy.array: M length array of indices into X containing elements in Y
X is assumed to be ordered by start position.
For positions not contained within any segment, value in returned array will be -1
"""
# Y segment start greater than or equal to X segment start
idx = np.searchsorted(X[:, 0], Y[:, 0], side='right') - 1
# Y segment end less than or equal to X segment end
idx_end = np.searchsorted(X[:, 1], Y[:, 1], side='left')
# Mask positions outside greatest endpoint
mask = idx == idx_end
idx[~mask] = -1
return idx
def vrange(starts, lengths):
""" Create concatenated ranges of integers for multiple start/length
Args:
starts (numpy.array): starts for each range
lengths (numpy.array): lengths for each range (same length as starts)
Returns:
numpy.array: concatenated ranges
See the following illustrative example:
starts = np.array([1, 3, 4, 6])
lengths = np.array([0, 2, 3, 0])
print vrange(starts, lengths)
>>> [3 4 4 5 6]
"""
# Repeat start position index length times and concatenate
cat_start = np.repeat(starts, lengths)
# Create group counter that resets for each start/length
cat_counter = np.arange(lengths.sum()) - np.repeat(lengths.cumsum() - lengths, lengths)
# Add group counter to group specific starts
cat_range = cat_start + cat_counter
return cat_range
def interval_position_overlap(intervals, positions):
""" Map intervals to contained positions
Args:
intervals (numpy.array): start and end of intervals with shape (N,2) for N intervals
positions (numpy.array): positions, length M, must be sorted
Returns:
numpy.array: interval index, length L (arbitrary)
numpy.array: position index, length L (same as interval index)
Given a set of possibly overlapping intervals, create a mapping of positions that are contained
within those intervals.
"""
# Search for start and end of each interval in list of positions
start_pos_idx = np.searchsorted(positions, intervals[:,0])
end_pos_idx = np.searchsorted(positions, intervals[:,1])
# Calculate number of positions for each segment
lengths = end_pos_idx - start_pos_idx
# Interval index for mapping
interval_idx = np.repeat(np.arange(len(lengths)), lengths)
# Position index for mapping
position_idx = vrange(start_pos_idx, lengths)
return interval_idx, position_idx
def reindex_segments(cn_1, cn_2):
""" Reindex segment data to a common set of intervals
Args:
cn_1 (pandas.DataFrame): table of copy number
cn_2 (pandas.DataFrame): another table of copy number
Returns:
pandas.DataFrame: reindex table
Expected columns of input dataframe: 'chromosome', 'start', 'end'
Output dataframe has columns 'chromosome', 'start', 'end', 'idx_1', 'idx_2'
where 'idx_1', and 'idx_2' are the indexes into cn_1 and cn_2 of sub segments
with the given chromosome, start, and end.
"""
if len(cn_1.index) == 0 or len(cn_2.index) == 0:
empty = pd.DataFrame(columns=['chromosome', 'start', 'end', 'idx_1', 'idx_2'], dtype=int)
empty['chromosome'] = empty['chromosome'].astype(str)
return empty
reseg = list()
for chromosome, chrom_cn_1 in cn_1.groupby('chromosome'):
chrom_cn_2 = cn_2[cn_2['chromosome'] == chromosome]
if len(chrom_cn_2.index) == 0:
continue
segment_boundaries = np.concatenate([
chrom_cn_1['start'].values,
chrom_cn_1['end'].values,
chrom_cn_2['start'].values,
chrom_cn_2['end'].values,
])
segment_boundaries = np.sort(np.unique(segment_boundaries))
chrom_reseg = pd.DataFrame({
'start':segment_boundaries[:-1],
'end':segment_boundaries[1:],
})
for suffix, chrom_cn in zip(('_1', '_2'), (chrom_cn_1, chrom_cn_2)):
chrom_reseg['start_idx'+suffix] = np.searchsorted(
chrom_cn['start'].values,
chrom_reseg['start'].values,
side='right',
) - 1
chrom_reseg['end_idx'+suffix] = np.searchsorted(
chrom_cn['end'].values,
chrom_reseg['end'].values,
side='left',
)
chrom_reseg['filter'+suffix] = (
(chrom_reseg['start_idx'+suffix] != chrom_reseg['end_idx'+suffix]) |
(chrom_reseg['start_idx'+suffix] < 0) |
(chrom_reseg['start_idx'+suffix] >= len(chrom_reseg['end'].values))
)
chrom_reseg = chrom_reseg[~chrom_reseg['filter_1'] & ~chrom_reseg['filter_2']]
for suffix, chrom_cn in zip(('_1', '_2'), (chrom_cn_1, chrom_cn_2)):
chrom_reseg['idx'+suffix] = chrom_cn.index.values[chrom_reseg['start_idx'+suffix].values]
chrom_reseg.drop(['start_idx'+suffix, 'end_idx'+suffix, 'filter'+suffix], axis=1, inplace=True)
chrom_reseg['chromosome'] = chromosome
reseg.append(chrom_reseg)
return
|
pd.concat(reseg, ignore_index=True)
|
pandas.concat
|
import pandas as pd
def venue_popularity(data):
venues_popularity_dict = {}
venue_reformatted =
|
pd.Series()
|
pandas.Series
|
from collections import defaultdict
from ..apps.clashfilter import df_ideal_ala, rel_coords_dict, Clash, ClashVDM, make_pose_df, \
backbone_str, Contact, make_df_corr, VdmReps
import pickle
import numpy as np
import pandas as pd
from ..apps.transformation import get_rot_trans
from prody import calcPhi, calcPsi, writePDB, AtomGroup
from sklearn.neighbors import NearestNeighbors
from ..apps.convex_hull import AlphaHull
from numba import jit
import time
import os
import copy
import random
import itertools
from scipy.spatial.distance import cdist
coords = ['c_x', 'c_y', 'c_z', 'c_D_x', 'c_D_y',
'c_D_z', 'c_H1_x', 'c_H1_y', 'c_H1_z',
'c_H2_x', 'c_H2_y', 'c_H2_z',
'c_H3_x', 'c_H3_y', 'c_H3_z',
'c_H4_x', 'c_H4_y', 'c_H4_z',
'c_A1_x', 'c_A1_y', 'c_A1_z',
'c_A2_x', 'c_A2_y', 'c_A2_z']
class Template:
def __init__(self, pdb):
self.pdb = pdb # pdb should be prody object poly-gly with CA hydrogens for design.
self.dataframe = make_pose_df(self.pdb)
self.alpha_hull = None
@staticmethod
def get_bb_sel(pdb):
return pdb.select(backbone_str).copy()
def get_phi_psi(self, seg, chain, resnum):
res = self.pdb[seg, chain, resnum]
try:
phi = calcPhi(res)
except ValueError:
phi = None
try:
psi = calcPsi(res)
except ValueError:
psi = None
return phi, psi
def set_alpha_hull(self, pdb_w_CB, alpha=9):
self.pdb_w_CB = pdb_w_CB
self.alpha_hull = AlphaHull(alpha)
self.alpha_hull.set_coords(pdb_w_CB)
self.alpha_hull.calc_hull()
class Load:
"""Doesn't yet deal with terminal residues (although phi/psi does)"""
def __init__(self, **kwargs):
self.name = kwargs.get('name')
self.path = kwargs.get('path', './') # path to sig reps
self.sequence_csts = kwargs.get('sequence_csts') # keys1 are tuples (seq, ch, #), keys2 are label,
# vals are allowed residue names (three letter code).
self.dataframe = pd.DataFrame()
self.dataframe_grouped = None
self._rot = defaultdict(dict)
self._mobile_com = defaultdict(dict)
self._target_com = defaultdict(dict)
self._sig_reps = defaultdict(dict)
self._ideal_ala_df = defaultdict(dict)
self._nonclashing = list()
self.remove_from_df = kwargs.get('remove_from_df') # e.g. {1: {'chain': 'Y', 'name': 'CB', 'resname': 'ASN'},
# 2: {'chain': 'Y', 'name': 'CG', 'resname': 'GLN'}}
@staticmethod
def _get_targ_coords(template, label, seg, chain, resnum):
sel_str = 'segment ' + seg + ' chain ' + chain + ' resnum ' + str(resnum) + ' name '
cs = []
for n in rel_coords_dict[label]:
try:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
except AttributeError:
try:
cs = []
for n in ['N', '1H', 'CA']:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
return np.stack(cs)
except AttributeError:
try:
cs = []
for n in ['N', 'H1', 'CA']:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
return np.stack(cs)
except AttributeError:
sel_str = 'chain ' + chain + ' resnum ' + str(resnum) + ' name '
cs = []
for n in rel_coords_dict[label]:
try:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
except AttributeError:
cs = []
for n in ['N', '1H', 'CA']:
cs.append(template.pdb.select(sel_str + n).getCoords()[0])
return np.stack(cs)
return np.stack(cs)
return np.stack(cs)
@staticmethod
def _get_mob_coords(df, label):
return np.stack(df[df['name'] == n][['c_x', 'c_y', 'c_z']].values.flatten()
for n in rel_coords_dict[label])
def set_rot_trans(self, template):
for seg, chain, resnum in self.sequence_csts.keys():
for label, df in df_ideal_ala.items():
mob_coords = self._get_mob_coords(df, label)
targ_coords = self._get_targ_coords(template, label, seg, chain, resnum)
R, m_com, t_com = get_rot_trans(mob_coords, targ_coords)
self._rot[label][(seg, chain, resnum)] = R
self._mobile_com[label][(seg, chain, resnum)] = m_com
self._target_com[label][(seg, chain, resnum)] = t_com
df_ = df.copy()
df_[['c_x', 'c_y', 'c_z']] = np.dot(df_[['c_x', 'c_y', 'c_z']] - m_com, R) + t_com
self._ideal_ala_df[label][(seg, chain, resnum)] = df_
def _import_sig_reps(self):
labels_resns = defaultdict(set)
for tup in self.sequence_csts.keys():
for label in self.sequence_csts[tup].keys():
labels_resns[label] |= set(self.sequence_csts[tup][label])
for label in labels_resns.keys():
for resn in labels_resns[label]:
try:
with open(self.path + label + '/' + resn + '.pkl', 'rb') as infile:
self._sig_reps[label][resn] = pickle.load(infile)
except FileNotFoundError:
pass
@staticmethod
def _get_phi_psi_df(df, phi, psi, phipsi_width=60):
if phi is not None:
phi_high = df['phi'] < (phi + (phipsi_width / 2))
phi_low = df['phi'] > (phi - (phipsi_width / 2))
else:
phi_high = np.array([True] * len(df))
phi_low = phi_high
if psi is not None:
psi_high = df['psi'] < (psi + (phipsi_width / 2))
psi_low = df['psi'] > (psi - (phipsi_width / 2))
else:
psi_high = np.array([True] * len(df))
psi_low = psi_high
return df[phi_high & phi_low & psi_high & psi_low]
@staticmethod
def chunk_df(df_gr, gr_chunk_size=100):
grs = list()
for i, (n, gr) in enumerate(df_gr):
grs.append(gr)
if (i + 1) % gr_chunk_size == 0:
yield pd.concat(grs)
grs = list()
def _load(self, template, seg, chain, resnum, **kwargs):
phipsi_width = kwargs.get('phipsi_width', 60)
dfs = list()
for label in self.sequence_csts[(seg, chain, resnum)].keys():
print('loading ' + str((seg, chain, resnum)) + ' , ' + label)
if label == 'PHI_PSI':
df_list = list()
phi, psi = template.get_phi_psi(seg, chain, resnum)
for resn in self.sequence_csts[(seg, chain, resnum)][label]:
df_phipsi = self._get_phi_psi_df(self._sig_reps[label][resn],
phi, psi, phipsi_width)
df_list.append(df_phipsi)
df = pd.concat(df_list)
else:
df = pd.concat([self._sig_reps[label][resn]
for resn in self.sequence_csts[(seg, chain, resnum)][label]])
if self.remove_from_df is not None:
for d in self.remove_from_df.values():
tests = []
for col, val in d.items():
tests.append(df[col] == val)
tests = np.array(tests).T
tests = tests.all(axis=1)
df = df.loc[~tests]
m_com = self._mobile_com[label][(seg, chain, resnum)]
t_com = self._target_com[label][(seg, chain, resnum)]
R = self._rot[label][(seg, chain, resnum)]
print('transforming coordinates...')
df[coords[:3]] = np.dot(df[coords[:3]] - m_com, R) + t_com
df[coords[3:6]] = np.dot(df[coords[3:6]] - m_com, R) + t_com
df[coords[6:9]] = np.dot(df[coords[6:9]] - m_com, R) + t_com
df[coords[9:12]] = np.dot(df[coords[9:12]] - m_com, R) + t_com
df[coords[12:15]] = np.dot(df[coords[12:15]] - m_com, R) + t_com
df[coords[15:18]] = np.dot(df[coords[15:18]] - m_com, R) + t_com
df[coords[18:21]] = np.dot(df[coords[18:21]] - m_com, R) + t_com
df[coords[21:]] = np.dot(df[coords[21:]] - m_com, R) + t_com
df['seg_chain_resnum'] = [(seg, chain, resnum)] * len(df)
df['seg_chain_resnum_'] = [seg + '_' + chain + '_' + str(resnum)] * len(df)
###NEW STUFF FOR CLASH FILTER TEST
df['str_index'] = df['iFG_count'] + '_' + df['vdM_count'] + '_' + df['query_name'] + '_' + df['seg_chain_resnum_']
print('making transformed dataframe...')
dfs.append(df)
dataframe = pd.concat(dfs, sort=False, ignore_index=True)
print('removing clashes...')
df_nonclash = self._remove(dataframe, template, seg, chain, resnum, **kwargs)
self._nonclashing.append(df_nonclash)
@staticmethod
def _remove(dataframe, template, seg, chain, resnum, **kwargs):
t0 = time.time()
cla = ClashVDM(dfq=dataframe, dft=template.dataframe)
cla.set_grouping('str_index')
cla.set_exclude((resnum, chain, seg))
cla.setup()
cla.find(**kwargs)
tf = time.time()
print('time:', tf-t0)
return cla.dfq_clash_free
def load(self, template, **kwargs):
if not self._sig_reps:
self._import_sig_reps()
if not self._rot:
self.set_rot_trans(template)
for seg, chain, resnum in self.sequence_csts.keys():
self._load(template, seg, chain, resnum, **kwargs)
print('concatenating non-clashing to dataframe')
t0 = time.time()
self.dataframe = pd.concat(self._nonclashing, sort=False, ignore_index=True)
self._nonclashing = list()
self._sig_reps = defaultdict(dict)
tf = time.time() - t0
print('concatenated in ' + str(tf) + ' seconds.')
self._set_grouped_dataframe()
def load_additional(self, template, sequence_csts, **kwargs):
seq_csts = defaultdict(dict)
seq_csts_copy = copy.deepcopy(self.sequence_csts)
for seg_ch_rn in sequence_csts.keys():
if seg_ch_rn not in self.sequence_csts.keys():
seq_csts[seg_ch_rn] = sequence_csts[seg_ch_rn]
seq_csts_copy[seg_ch_rn] = sequence_csts[seg_ch_rn]
if len(seq_csts.keys()) > 0:
self.path = kwargs.get('path', self.path)
self.sequence_csts = seq_csts
self._import_sig_reps()
self.set_rot_trans(template)
self._nonclashing = list()
for seg, chain, resnum in self.sequence_csts.keys():
self._load(template, seg, chain, resnum, **kwargs)
print('concatenating non-clashing to dataframe')
t0 = time.time()
_dataframe =
|
pd.concat(self._nonclashing, sort=False, ignore_index=True)
|
pandas.concat
|
from PhiRelevance.PhiUtils1 import phiControl,phi
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import math
from random import seed
from random import randint
from random import random
class SmoteRRegression:
"""
Class SmoteRRegression takes arguments as follows:
data - Pandas data frame with target value as last column, rest columns should be features/attributes
method - "auto"(default, also called "extremes"),"range"
extrType - "high", "both"(default), "low"
thr_rel - user defined relevance threadhold between 0 to 1, all the target values with relevance above
the threshold are candicates to be oversampled
controlPts - list of control points formatted as [y1, phi(y1), phi'(y1), y2, phi(y2), phi'(y2)], where
y1: target value; phi(y1): relevane value of y1; phi'(y1): derivative of phi(y1), etc.
c_perc - under and over sampling strategy, Gaussian noise in this implementation should be applied in each bump with oversampling(interesting) sets,
possible types are defined below,
"balance" - will try to distribute the examples evenly across the existing bumps
"extreme" - invert existing frequency of interesting/uninteresting set
<percentage> - A list of percentage values with the following formats,
for any percentage value < 1, there should be either 1 percentage value applies to all bumps of undersampling set,
or multiple percentage values mapping to each bump of undersampling set;
for any percentage value > 1, there should be either 1 percentage value applies to all bumps of oversampling set
or multiple percentage values mapping to each bump of oversampling set;
k - The number of nearest neighbors, default value is 5
"""
def __init__(self, data, method='auto', extrType='both', thr_rel=1.0, controlPts=[], c_perc="balance", k=5):
seed(1)
self.data = data;
self.method = 'extremes' if method in ['extremes', 'auto'] else 'range'
if self.method == 'extremes':
if extrType in ['high','low','both']:
self.extrType = extrType
else:
self.extrType = 'both'
else:
self.extrType =''
self.thr_rel = thr_rel
if method == 'extremes':
self.controlPts = []
else:
self.controlPts = controlPts
self.c_perc_undersampling = []
self.c_perc_oversampling = []
if str == type(c_perc):
self.c_perc = c_perc if c_perc in ["balance", "extreme"] else c_perc
elif list == type(c_perc):
self.c_perc = 'percentage list'
self.processCPerc(c_perc)
self.k = k
self.coef = 1.5
def processCPerc(self, c_perc):
for x in c_perc:
if x < 1.0:
self.c_perc_undersampling.append(float(x))
elif x > 1.0:
self.c_perc_oversampling.append(float(x))
else:
print('c_perc value in list should not be 1!')
print(f'c_perc_undersampling: {self.c_perc_undersampling}')
print(f'c_perc_oversampling: {self.c_perc_oversampling}')
def getMethod(self):
return self.method
def getData(self):
return self.data
def getExtrType(self):
return self.extrType
def getThrRel(self):
return self.thr_rel
def getControlPtr(self):
return self.controlPts
def getCPerc(self):
if self.c_perc in ['balance', 'extreme']:
return self.c_perc
else:
return self.c_perc_undersampling, self.c_perc_oversampling
def getK(self):
return self.k
def set_obj_interesting_set(self, data):
self.interesting_set = self.get_interesting_set(data)
def get_obj_interesting_set(self):
return self.interesting_set
def set_obj_uninteresting_set(self, data):
self.uninteresting_set = self.get_uninteresting_set(data)
def get_obj_uninteresting_set(self):
return self.uninteresting_set
def set_obj_bumps(self, data):
self.bumps_undersampling, self.bumps_oversampling = self.calc_bumps(data)
def get_obj_bumps(self):
return self.bumps_undersampling, self.bumps_oversampling
def resample(self):
yPhi, ydPhi, yddPhi = self.calc_rel_values()
data1 = self.preprocess_data(yPhi)
#interesting set
self.set_obj_interesting_set(data1)
#uninteresting set
self.set_obj_uninteresting_set(data1)
#calculate bumps
self.set_obj_bumps(data1)
if self.c_perc == 'percentage list':
resampled = self.process_percentage()
elif self.c_perc == 'balance':
resampled = self.process_balance()
elif self.c_perc == 'extreme':
resampled = self.process_extreme()
return resampled
def preprocess_data(self, yPhi):
#append column 'yPhi'
data1 = self.data
data1['yPhi'] = yPhi
data1 = self.data.sort_values(by=['Tgt'])
return data1
def get_uninteresting_set(self, data):
uninteresting_set = data[data.yPhi < self.thr_rel]
return uninteresting_set
def get_interesting_set(self, data):
interesting_set = data[data.yPhi >= self.thr_rel]
return interesting_set
def calc_rel_values(self):
#retrieve target(last column) from DataFrame
y = self.data.iloc[:,-1]
#generate control ptrs
if self.method == 'extremes':
controlPts, npts = phiControl(y, extrType=self.extrType)
else:
controlPts, npts = phiControl(y, 'range', extrType="", controlPts=self.controlPts)
#calculate relevance value
yPhi, ydPhi, yddPhi = phi(y, controlPts, npts, self.method)
return yPhi, ydPhi, yddPhi
def calc_bumps(self, df):
thr_rel = self.thr_rel
less_than_thr_rel = True if df.loc[0,'yPhi'] < thr_rel else False
bumps_oversampling = []
bumps_undersampling = []
bumps_oversampling_df = pd.DataFrame(columns = df.columns)
bumps_undersampling_df = pd.DataFrame(columns = df.columns)
for idx, row in df.iterrows():
if less_than_thr_rel and (row['yPhi'] < thr_rel):
bumps_undersampling_df = bumps_undersampling_df.append(row)
elif less_than_thr_rel and row['yPhi'] >= thr_rel:
bumps_undersampling.append(bumps_undersampling_df)
bumps_undersampling_df = pd.DataFrame(columns = df.columns)
bumps_oversampling_df = bumps_oversampling_df.append(row)
less_than_thr_rel = False
elif (not less_than_thr_rel) and (row['yPhi'] >= thr_rel):
bumps_oversampling_df = bumps_oversampling_df.append(row)
elif (not less_than_thr_rel) and (row['yPhi'] < thr_rel):
bumps_oversampling.append(bumps_oversampling_df)
bumps_oversampling_df = pd.DataFrame(columns = df.columns)
bumps_undersampling_df = bumps_undersampling_df.append(row)
less_than_thr_rel = True
if less_than_thr_rel and (df.iloc[-1,:]['yPhi'] < thr_rel):
bumps_undersampling.append(bumps_undersampling_df)
elif not less_than_thr_rel and (df.iloc[-1,:]['yPhi'] >= thr_rel):
bumps_oversampling.append(bumps_oversampling_df)
return bumps_undersampling, bumps_oversampling
def process_percentage(self):
undersampling_and_interesting, new_samples_set = self.preprocess_percentage()
reduced_cols = new_samples_set.columns.values.tolist()[:-1]
dups_sample_counts = new_samples_set.pivot_table(index=reduced_cols, aggfunc='size')
interesting_set_list = self.interesting_set.iloc[:,:-1].values.tolist()
#new samples from smote
new_samples_smote = []
for index, value in dups_sample_counts.items():
base_sample = list(index)
#print(f'base_sample={base_sample}')
kNN_result = self.kNN_calc(self.k, base_sample, interesting_set_list)
#Generating new samples
for x in range(value):
idx = randint(0, 4)
#print(f'x={x},idx={idx}')
nb = kNN_result[idx]
#Generate attribute values
new_sample = []
for y in range(len(base_sample)-1):
diff = abs(base_sample[y]-nb[y])
new_sample.append(base_sample[y]+random()*diff)
#Calc target value
a = np.array(new_sample)
b = np.array(base_sample[:-1])
d1 = np.linalg.norm(a-b)
c = np.array(nb[:-1])
d2 = np.linalg.norm(a-c)
new_target = (d2*base_sample[-1]+d1*nb[-1])/(d1+d2)
new_sample.append(new_target)
#print(f'new_sample={new_sample}')
new_samples_smote.append(new_sample)
print(f'len={len(new_samples_smote)}')
#print(f'{new_samples_smote}')
#Generate final result
undersampling_and_interesting.drop('yPhi',axis=1,inplace=True )
df_new_samples_smote = pd.DataFrame(new_samples_smote)
df_new_samples_smote.columns = reduced_cols
frames = [undersampling_and_interesting, df_new_samples_smote]
result = pd.concat(frames)
return result
def preprocess_percentage(self):
#process undersampling
len_c_perc_undersampling = len(self.c_perc_undersampling)
print(f'len_c_perc_undersampling={len_c_perc_undersampling}')
len_bumps_undersampling = len(self.bumps_undersampling)
print(f'len_bumps_undersampling={len_bumps_undersampling}')
resampled_sets = []
if len_c_perc_undersampling == 0:
print('no undersampling, append uninteresting set directly')
resampled_sets.append(self.uninteresting_set)
elif len_c_perc_undersampling == 1:
undersample_perc = self.c_perc_undersampling[0]
print('len(self.c_perc) == 1')
print(f'process_percentage(): undersample_perc={undersample_perc}')
#iterate undersampling bumps to apply undersampling percentage
for s in self.bumps_undersampling:
print(f'process_percentage(): bump size={len(s)}')
resample_size = round(len(s)*undersample_perc)
print(f'process_percentage(): resample_size={resample_size}')
resampled_sets.append(s.sample(n = resample_size))
elif len_c_perc_undersampling == len_bumps_undersampling:
for i in range(len(self.bumps_undersampling)):
print(f'len(self.c_perc) > 1 loop i={i}')
undersample_perc = self.c_perc_undersampling[i]
print(f'process_percentage(): undersample_perc={undersample_perc}')
resample_size = round(len(self.bumps_undersampling[i])*undersample_perc)
print(f'process_percentage(): resample_size={resample_size}')
resampled_sets.append(self.bumps_undersampling[i].sample(n = resample_size))
else:
print(f'length of c_perc for undersampling {len_c_perc_undersampling} != length of bumps undersampling {len_bumps_undersampling}')
#uninteresting bumps are now stored in list resampled_sets
#also adding original interesting set
resampled_sets.append(self.interesting_set)
#Oversampling with SmoteR
len_c_perc_oversampling = len(self.c_perc_oversampling)
print(f'len_c_perc_oversampling={len_c_perc_oversampling}')
len_bumps_oversampling = len(self.bumps_oversampling)
print(f'len_bumps_oversampling={len_bumps_oversampling}')
resampled_oversampling_set = []
if len(self.c_perc_oversampling) == 1:
#oversampling - new samples set
c_perc_frac, c_perc_int = 0.0, 0.0
for s in self.bumps_oversampling:
# size of the new samples
print(f'c_perc_oversampling[0]={self.c_perc_oversampling[0]}')
if self.c_perc_oversampling[0]>1.0 and self.c_perc_oversampling[0]<2.0:
size_new_samples_set = round(len(s)*(self.c_perc_oversampling[0]-1))
print(f'size_new_samples_set={size_new_samples_set}')
resampled_oversampling_set.append(s.sample(n = size_new_samples_set))
elif self.c_perc_oversampling[0]>2.0:
c_perc_frac, c_perc_int = math.modf(self.c_perc_oversampling[0])
print(f'c_perc_int, c_perc_frac =={c_perc_int, c_perc_frac}')
if c_perc_frac > 0.0:
size_frac_new_samples_set = round(len(s)*c_perc_frac)
resampled_oversampling_set.append(s.sample(n=size_frac_new_samples_set))
ss = s.loc[s.index.repeat(int(c_perc_int)-1)]
resampled_oversampling_set.append(ss)
elif len_c_perc_oversampling == len_bumps_oversampling:
for i in range(len(self.bumps_oversampling)):
print(f'len(self.c_perc) > 1 loop i={i}')
c_perc_bump = self.c_perc_oversampling[i]
print(f'process_percentage(): undersample_perc={c_perc_bump}')
if c_perc_bump>1.0 and c_perc_bump<2.0:
size_new_samples_set = round(len(s)*(c_perc_bump-1))
print(f'size_new_samples_set={size_new_samples_set}')
resampled_oversampling_set.append(s.sample(n = size_new_samples_set))
elif c_perc_bump>2.0:
c_perc_frac, c_perc_int = math.modf(self.c_perc_oversampling[0])
print(f'c_perc_int, c_perc_frac =={c_perc_int, c_perc_frac}')
if c_perc_frac>0.0:
size_frac_new_samples_set = round(len(self.bumps_oversampling[i])*c_perc_frac)
resampled_oversampling_set.append(self.bumps_oversampling[i].sample(n=size_frac_new_samples_set))
ss = self.bumps_oversampling[i].loc[self.bumps_oversampling[i].index.repeat(int(c_perc_int)-1)]
resampled_oversampling_set.append(ss)
else:
print(f'length of c_perc for oversampling {len_c_perc_oversampling} != length of bumps oversampling {len_bumps_oversampling}')
#Combining all undersampling sets and interesting set
undersampling_and_interesting = pd.concat(resampled_sets)
#Combining all new samples
new_samples_set = pd.concat(resampled_oversampling_set)
return undersampling_and_interesting, new_samples_set
def kNN_calc(self, k, sample_as_list, interesting_set_list):
a = np.array(sample_as_list[:-1])
for sample_interesting in interesting_set_list:
b = np.array(sample_interesting[:-1])
dist = np.linalg.norm(a-b)
sample_interesting.append(dist)
kNN_result = sorted(interesting_set_list, key=lambda x:x[-1])[1:(k+1)]
for j in interesting_set_list:
del j[-1]
return kNN_result
def process_balance(self):
new_samples_set = self.preprocess_balance()
reduced_cols = new_samples_set.columns.values.tolist()[:-1]
dups_sample_counts = new_samples_set.pivot_table(index=reduced_cols, aggfunc='size')
interesting_set_list = self.interesting_set.iloc[:,:-1].values.tolist()
#new samples from smote
new_samples_smote = []
for index, value in dups_sample_counts.items():
base_sample = list(index)
#print(f'base_sample={base_sample}')
kNN_result = self.kNN_calc(self.k, base_sample, interesting_set_list)
#Generating new samples
for x in range(value):
idx = randint(0, 4)
#print(f'x={x},idx={idx}')
nb = kNN_result[idx]
#Generate attribute values
new_sample = []
for y in range(len(base_sample)-1):
diff = abs(base_sample[y]-nb[y])
new_sample.append(base_sample[y]+random()*diff)
#Calc target value
a = np.array(new_sample)
b = np.array(base_sample[:-1])
d1 = np.linalg.norm(a-b)
c = np.array(nb[:-1])
d2 = np.linalg.norm(a-c)
new_target = (d2*base_sample[-1]+d1*nb[-1])/(d1+d2)
new_sample.append(new_target)
#print(f'new_sample={new_sample}')
new_samples_smote.append(new_sample)
print(f'len={len(new_samples_smote)}')
#print(f'{new_samples_smote}')
#Generate final result
data = self.getData()
data.drop('yPhi',axis=1,inplace=True )
df_new_samples_smote = pd.DataFrame(new_samples_smote)
df_new_samples_smote.columns = reduced_cols
frames = [data, df_new_samples_smote]
result = pd.concat(frames)
return result
def preprocess_balance(self):
new_samples_per_bump = round(len(self.uninteresting_set) / len(self.bumps_oversampling))
print(f'process_balance(): resample_size per bump={new_samples_per_bump}')
resampled_oversampling_set = []
for s in self.bumps_oversampling:
ratio = new_samples_per_bump / len(s)
print(f'ratio={ratio}')
if ratio>1.0 and ratio<2.0:
size_new_samples_set = round(len(s)*(ratio-1))
print(f'size_new_samples_set={size_new_samples_set}')
resampled_oversampling_set.append(s.sample(n = size_new_samples_set))
elif ratio>2.0:
c_perc_frac, c_perc_int = math.modf(ratio)
print(f'c_perc_int, c_perc_frac =={c_perc_int, c_perc_frac}')
if c_perc_frac > 0.0:
size_frac_new_samples_set = round(len(s)*c_perc_frac)
resampled_oversampling_set.append(s.sample(n=size_frac_new_samples_set))
ss = s.loc[s.index.repeat(int(c_perc_int)-1)]
resampled_oversampling_set.append(ss)
#combining new samples
new_samples_set =
|
pd.concat(resampled_oversampling_set)
|
pandas.concat
|
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 26 14:02:03 2019
@author: <NAME>
"""
import pandas as pd
from pandas import ExcelWriter
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
def match2Lists(list1,list2):
"""
Loops over a list and returns fuzzy matches found in a second list.
Inputs:
list1 - list of terms to search for in the master list
list2 - master list that is searched for matches over and over
"""
TopMatch = []
TopScore = []
TopRowIdx = []
for member in list1:
x=process.extractOne(member, list2)
TopMatch.append(x[0])
TopScore.append(x[1])
TopRowIdx.append(x[2])
return TopMatch, TopScore, TopRowIdx
def createRUID_List(rowIdxList, headerStr):
"""
Loops over a series containing row indices and returns a list of RUID strings.
Inputs:
rowIdxList - collection of row index values
headerStr - DataFrame header string value for column containing RUIDs
Outputs:
new list containing RUID strings
"""
RUID_List = []
for aRowIdx in rowIdxList:
workingRUID=df[headerStr].iloc[aRowIdx]
RUID_List.append(workingRUID)
return RUID_List
df = pd.read_excel("DataReleaseMismatches.xlsx",sheet_name = "Sheet3")
print ('Finished reading in input file.')
#blackList=['NDAR_INV']
#for pattern in blackList:
# df['pGUID_Rutgers'] = df['pGUID_Rutgers'].replace(pattern, '')
#datasets
DR_DAIC_pGUIDs = df['pGUID_DAIC'].dropna()
Current_DAIC_pGUIDs = df['abcd.id_redcap'].dropna()
print ('About to start first match2collections.')
BestMatch_DRtoC, BestScore_DRtoC, BestRowIdx_DRtoC = match2Lists(DR_DAIC_pGUIDs, Current_DAIC_pGUIDs)
print ('Just finished first match2collections.')
#print ('About to start second match2collections.')
#BestMatch_RtoD, BestScore_RtoD, BestRowIdx_RtoD = match2Lists(Unique_Rutgers_Invs, AllDAIC_Invs)
#print ('Just finished second match2collections.')
#print ('About to start third match2collections.')
#BestMatch_DtoD, BestScore_DtoD, BestRowIdx_DtoD = match2Lists(Unique_DAIC_IDs, AllDAIC_IDs)
#print ('Just finished third match2collections.')
#print ('About to start fourth match2collections.')
#BestMatch_RtoR, BestScore_RtoR, BestRowIdx_RtoR = match2Lists(Unique_Rutgers_IDs, AllRutgersIDs)
#print ('Just finished fourth match2collections.')
df['BestMatchdf_DRtoC']=
|
pd.Series(BestMatch_DRtoC)
|
pandas.Series
|
import pandas as pd
from .datastore import merge_postcodes
from .types import ErrorDefinition
from .utils import add_col_to_tables_CONTINUOUSLY_LOOKED_AFTER as add_CLA_column # Check 'Episodes' present before use!
def validate_165():
error = ErrorDefinition(
code = '165',
description = 'Data entry for mother status is invalid.',
affected_fields = ['MOTHER', 'SEX', 'ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'Header' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
header = dfs['Header']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
valid_values = ['0','1']
# prepare to merge
oc3.reset_index(inplace=True)
header.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['EPS'] = (episodes['DECOM']>=collection_start) & (episodes['DECOM']<=collection_end)
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
merged = episodes.merge(header, on='CHILD', how='left', suffixes=['_eps', '_er']).merge(oc3, on='CHILD', how='left')
# Raise error if provided <MOTHER> is not a valid value.
value_validity = merged['MOTHER'].notna() & (~merged['MOTHER'].isin(valid_values))
# If not provided
female = (merged['SEX']=='1')
eps_in_year = (merged['EPS_COUNT']>0)
none_provided = (merged['ACTIV'].isna()& merged['ACCOM'].isna()& merged['IN_TOUCH'].isna())
# If provided <MOTHER> must be a valid value. If not provided <MOTHER> then either <GENDER> is male or no episode record for current year and any of <IN_TOUCH>, <ACTIV> or <ACCOM> have been provided
mask = value_validity | (merged['MOTHER'].isna() & (female & (eps_in_year | none_provided)))
# That is, if value not provided and child is a female with eps in current year or no values of IN_TOUCH, ACTIV and ACCOM, then raise error.
error_locs_eps = merged.loc[mask, 'index_eps']
error_locs_header = merged.loc[mask, 'index_er']
error_locs_oc3 = merged.loc[mask, 'index']
return {'Header':error_locs_header.dropna().unique().tolist(),
'OC3':error_locs_oc3.dropna().unique().tolist()}
return error, _validate
def validate_1014():
error = ErrorDefinition(
code='1014',
description='UASC information is not required for care leavers',
affected_fields=['ACTIV', 'ACCOM', 'IN_TOUCH', 'DECOM']
)
def _validate(dfs):
if 'UASC' not in dfs or 'Episodes' not in dfs or 'OC3' not in dfs:
return {}
else:
uasc = dfs['UASC']
episodes = dfs['Episodes']
oc3 = dfs['OC3']
collection_start = dfs['metadata']['collection_start']
collection_end = dfs['metadata']['collection_end']
# prepare to merge
oc3.reset_index(inplace=True)
uasc.reset_index(inplace=True)
episodes.reset_index(inplace=True)
collection_start = pd.to_datetime(collection_start, format='%d/%m/%Y', errors='coerce')
collection_end = pd.to_datetime(collection_end, format='%d/%m/%Y', errors='coerce')
episodes['DECOM'] = pd.to_datetime(episodes['DECOM'], format='%d/%m/%Y', errors='coerce')
episodes['DEC'] = pd.to_datetime(episodes['DEC'], format='%d/%m/%Y', errors='coerce')
date_check = (
((episodes['DECOM'] >= collection_start) & (episodes['DECOM'] <= collection_end))
| ((episodes['DEC'] >= collection_start) & (episodes['DEC'] <= collection_end))
| ((episodes['DECOM'] <= collection_start) & episodes['DEC'].isna())
)
episodes['EPS'] = date_check
episodes['EPS_COUNT'] = episodes.groupby('CHILD')['EPS'].transform('sum')
# inner merge to take only episodes of children which are also found on the uasc table
merged = episodes.merge(uasc, on='CHILD', how='inner', suffixes=['_eps', '_sc']).merge(oc3, on='CHILD',
how='left')
# adding suffixes with the secondary merge here does not go so well yet.
some_provided = (merged['ACTIV'].notna() | merged['ACCOM'].notna() | merged['IN_TOUCH'].notna())
mask = (merged['EPS_COUNT'] == 0) & some_provided
error_locs_uasc = merged.loc[mask, 'index_sc']
error_locs_oc3 = merged.loc[mask, 'index']
return {'UASC': error_locs_uasc.unique().tolist(), 'OC3': error_locs_oc3.unique().tolist()}
return error, _validate
# !# not sure what this rule is actually supposed to be getting at - description is confusing
def validate_197B():
error = ErrorDefinition(
code='197B',
description="SDQ score or reason for no SDQ should be reported for 4- or 17-year-olds.",
affected_fields=['SDQ_REASON', 'DOB'],
)
def _validate(dfs):
if 'OC2' not in dfs or 'Episodes' not in dfs:
return {}
oc2 = add_CLA_column(dfs, 'OC2')
start = pd.to_datetime(dfs['metadata']['collection_start'], format='%d/%m/%Y', errors='coerce')
endo =
|
pd.to_datetime(dfs['metadata']['collection_end'], format='%d/%m/%Y', errors='coerce')
|
pandas.to_datetime
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.